1 //===---- TargetInfo.cpp - Encapsulate target details -----------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // These classes wrap the information about a call or function
11 // definition used to handle ABI compliancy.
13 //===----------------------------------------------------------------------===//
15 #include "TargetInfo.h"
19 #include "CodeGenFunction.h"
20 #include "clang/AST/RecordLayout.h"
21 #include "clang/CodeGen/CGFunctionInfo.h"
22 #include "clang/CodeGen/SwiftCallingConv.h"
23 #include "clang/Frontend/CodeGenOptions.h"
24 #include "llvm/ADT/StringExtras.h"
25 #include "llvm/ADT/Triple.h"
26 #include "llvm/IR/DataLayout.h"
27 #include "llvm/IR/Type.h"
28 #include "llvm/Support/raw_ostream.h"
29 #include <algorithm> // std::sort
31 using namespace clang;
32 using namespace CodeGen;
34 // Helper for coercing an aggregate argument or return value into an integer
35 // array of the same size (including padding) and alignment. This alternate
36 // coercion happens only for the RenderScript ABI and can be removed after
37 // runtimes that rely on it are no longer supported.
39 // RenderScript assumes that the size of the argument / return value in the IR
40 // is the same as the size of the corresponding qualified type. This helper
41 // coerces the aggregate type into an array of the same size (including
42 // padding). This coercion is used in lieu of expansion of struct members or
43 // other canonical coercions that return a coerced-type of larger size.
45 // Ty - The argument / return value type
46 // Context - The associated ASTContext
47 // LLVMContext - The associated LLVMContext
48 static ABIArgInfo coerceToIntArray(QualType Ty,
50 llvm::LLVMContext &LLVMContext) {
51 // Alignment and Size are measured in bits.
52 const uint64_t Size = Context.getTypeSize(Ty);
53 const uint64_t Alignment = Context.getTypeAlign(Ty);
54 llvm::Type *IntType = llvm::Type::getIntNTy(LLVMContext, Alignment);
55 const uint64_t NumElements = (Size + Alignment - 1) / Alignment;
56 return ABIArgInfo::getDirect(llvm::ArrayType::get(IntType, NumElements));
59 static void AssignToArrayRange(CodeGen::CGBuilderTy &Builder,
64 // Alternatively, we could emit this as a loop in the source.
65 for (unsigned I = FirstIndex; I <= LastIndex; ++I) {
67 Builder.CreateConstInBoundsGEP1_32(Builder.getInt8Ty(), Array, I);
68 Builder.CreateAlignedStore(Value, Cell, CharUnits::One());
72 static bool isAggregateTypeForABI(QualType T) {
73 return !CodeGenFunction::hasScalarEvaluationKind(T) ||
74 T->isMemberFunctionPointerType();
78 ABIInfo::getNaturalAlignIndirect(QualType Ty, bool ByRef, bool Realign,
79 llvm::Type *Padding) const {
80 return ABIArgInfo::getIndirect(getContext().getTypeAlignInChars(Ty),
81 ByRef, Realign, Padding);
85 ABIInfo::getNaturalAlignIndirectInReg(QualType Ty, bool Realign) const {
86 return ABIArgInfo::getIndirectInReg(getContext().getTypeAlignInChars(Ty),
87 /*ByRef*/ false, Realign);
90 Address ABIInfo::EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
92 return Address::invalid();
95 ABIInfo::~ABIInfo() {}
97 /// Does the given lowering require more than the given number of
98 /// registers when expanded?
100 /// This is intended to be the basis of a reasonable basic implementation
101 /// of should{Pass,Return}IndirectlyForSwift.
103 /// For most targets, a limit of four total registers is reasonable; this
104 /// limits the amount of code required in order to move around the value
105 /// in case it wasn't produced immediately prior to the call by the caller
106 /// (or wasn't produced in exactly the right registers) or isn't used
107 /// immediately within the callee. But some targets may need to further
108 /// limit the register count due to an inability to support that many
109 /// return registers.
110 static bool occupiesMoreThan(CodeGenTypes &cgt,
111 ArrayRef<llvm::Type*> scalarTypes,
112 unsigned maxAllRegisters) {
113 unsigned intCount = 0, fpCount = 0;
114 for (llvm::Type *type : scalarTypes) {
115 if (type->isPointerTy()) {
117 } else if (auto intTy = dyn_cast<llvm::IntegerType>(type)) {
118 auto ptrWidth = cgt.getTarget().getPointerWidth(0);
119 intCount += (intTy->getBitWidth() + ptrWidth - 1) / ptrWidth;
121 assert(type->isVectorTy() || type->isFloatingPointTy());
126 return (intCount + fpCount > maxAllRegisters);
129 bool SwiftABIInfo::isLegalVectorTypeForSwift(CharUnits vectorSize,
131 unsigned numElts) const {
132 // The default implementation of this assumes that the target guarantees
133 // 128-bit SIMD support but nothing more.
134 return (vectorSize.getQuantity() > 8 && vectorSize.getQuantity() <= 16);
137 static CGCXXABI::RecordArgABI getRecordArgABI(const RecordType *RT,
139 const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl());
141 return CGCXXABI::RAA_Default;
142 return CXXABI.getRecordArgABI(RD);
145 static CGCXXABI::RecordArgABI getRecordArgABI(QualType T,
147 const RecordType *RT = T->getAs<RecordType>();
149 return CGCXXABI::RAA_Default;
150 return getRecordArgABI(RT, CXXABI);
153 /// Pass transparent unions as if they were the type of the first element. Sema
154 /// should ensure that all elements of the union have the same "machine type".
155 static QualType useFirstFieldIfTransparentUnion(QualType Ty) {
156 if (const RecordType *UT = Ty->getAsUnionType()) {
157 const RecordDecl *UD = UT->getDecl();
158 if (UD->hasAttr<TransparentUnionAttr>()) {
159 assert(!UD->field_empty() && "sema created an empty transparent union");
160 return UD->field_begin()->getType();
166 CGCXXABI &ABIInfo::getCXXABI() const {
167 return CGT.getCXXABI();
170 ASTContext &ABIInfo::getContext() const {
171 return CGT.getContext();
174 llvm::LLVMContext &ABIInfo::getVMContext() const {
175 return CGT.getLLVMContext();
178 const llvm::DataLayout &ABIInfo::getDataLayout() const {
179 return CGT.getDataLayout();
182 const TargetInfo &ABIInfo::getTarget() const {
183 return CGT.getTarget();
186 bool ABIInfo:: isAndroid() const { return getTarget().getTriple().isAndroid(); }
188 bool ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
192 bool ABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base,
193 uint64_t Members) const {
197 bool ABIInfo::shouldSignExtUnsignedType(QualType Ty) const {
201 LLVM_DUMP_METHOD void ABIArgInfo::dump() const {
202 raw_ostream &OS = llvm::errs();
203 OS << "(ABIArgInfo Kind=";
206 OS << "Direct Type=";
207 if (llvm::Type *Ty = getCoerceToType())
219 OS << "InAlloca Offset=" << getInAllocaFieldIndex();
222 OS << "Indirect Align=" << getIndirectAlign().getQuantity()
223 << " ByVal=" << getIndirectByVal()
224 << " Realign=" << getIndirectRealign();
229 case CoerceAndExpand:
230 OS << "CoerceAndExpand Type=";
231 getCoerceAndExpandType()->print(OS);
237 // Dynamically round a pointer up to a multiple of the given alignment.
238 static llvm::Value *emitRoundPointerUpToAlignment(CodeGenFunction &CGF,
241 llvm::Value *PtrAsInt = Ptr;
242 // OverflowArgArea = (OverflowArgArea + Align - 1) & -Align;
243 PtrAsInt = CGF.Builder.CreatePtrToInt(PtrAsInt, CGF.IntPtrTy);
244 PtrAsInt = CGF.Builder.CreateAdd(PtrAsInt,
245 llvm::ConstantInt::get(CGF.IntPtrTy, Align.getQuantity() - 1));
246 PtrAsInt = CGF.Builder.CreateAnd(PtrAsInt,
247 llvm::ConstantInt::get(CGF.IntPtrTy, -Align.getQuantity()));
248 PtrAsInt = CGF.Builder.CreateIntToPtr(PtrAsInt,
250 Ptr->getName() + ".aligned");
254 /// Emit va_arg for a platform using the common void* representation,
255 /// where arguments are simply emitted in an array of slots on the stack.
257 /// This version implements the core direct-value passing rules.
259 /// \param SlotSize - The size and alignment of a stack slot.
260 /// Each argument will be allocated to a multiple of this number of
261 /// slots, and all the slots will be aligned to this value.
262 /// \param AllowHigherAlign - The slot alignment is not a cap;
263 /// an argument type with an alignment greater than the slot size
264 /// will be emitted on a higher-alignment address, potentially
265 /// leaving one or more empty slots behind as padding. If this
266 /// is false, the returned address might be less-aligned than
268 static Address emitVoidPtrDirectVAArg(CodeGenFunction &CGF,
270 llvm::Type *DirectTy,
271 CharUnits DirectSize,
272 CharUnits DirectAlign,
274 bool AllowHigherAlign) {
275 // Cast the element type to i8* if necessary. Some platforms define
276 // va_list as a struct containing an i8* instead of just an i8*.
277 if (VAListAddr.getElementType() != CGF.Int8PtrTy)
278 VAListAddr = CGF.Builder.CreateElementBitCast(VAListAddr, CGF.Int8PtrTy);
280 llvm::Value *Ptr = CGF.Builder.CreateLoad(VAListAddr, "argp.cur");
282 // If the CC aligns values higher than the slot size, do so if needed.
283 Address Addr = Address::invalid();
284 if (AllowHigherAlign && DirectAlign > SlotSize) {
285 Addr = Address(emitRoundPointerUpToAlignment(CGF, Ptr, DirectAlign),
288 Addr = Address(Ptr, SlotSize);
291 // Advance the pointer past the argument, then store that back.
292 CharUnits FullDirectSize = DirectSize.alignTo(SlotSize);
293 llvm::Value *NextPtr =
294 CGF.Builder.CreateConstInBoundsByteGEP(Addr.getPointer(), FullDirectSize,
296 CGF.Builder.CreateStore(NextPtr, VAListAddr);
298 // If the argument is smaller than a slot, and this is a big-endian
299 // target, the argument will be right-adjusted in its slot.
300 if (DirectSize < SlotSize && CGF.CGM.getDataLayout().isBigEndian() &&
301 !DirectTy->isStructTy()) {
302 Addr = CGF.Builder.CreateConstInBoundsByteGEP(Addr, SlotSize - DirectSize);
305 Addr = CGF.Builder.CreateElementBitCast(Addr, DirectTy);
309 /// Emit va_arg for a platform using the common void* representation,
310 /// where arguments are simply emitted in an array of slots on the stack.
312 /// \param IsIndirect - Values of this type are passed indirectly.
313 /// \param ValueInfo - The size and alignment of this type, generally
314 /// computed with getContext().getTypeInfoInChars(ValueTy).
315 /// \param SlotSizeAndAlign - The size and alignment of a stack slot.
316 /// Each argument will be allocated to a multiple of this number of
317 /// slots, and all the slots will be aligned to this value.
318 /// \param AllowHigherAlign - The slot alignment is not a cap;
319 /// an argument type with an alignment greater than the slot size
320 /// will be emitted on a higher-alignment address, potentially
321 /// leaving one or more empty slots behind as padding.
322 static Address emitVoidPtrVAArg(CodeGenFunction &CGF, Address VAListAddr,
323 QualType ValueTy, bool IsIndirect,
324 std::pair<CharUnits, CharUnits> ValueInfo,
325 CharUnits SlotSizeAndAlign,
326 bool AllowHigherAlign) {
327 // The size and alignment of the value that was passed directly.
328 CharUnits DirectSize, DirectAlign;
330 DirectSize = CGF.getPointerSize();
331 DirectAlign = CGF.getPointerAlign();
333 DirectSize = ValueInfo.first;
334 DirectAlign = ValueInfo.second;
337 // Cast the address we've calculated to the right type.
338 llvm::Type *DirectTy = CGF.ConvertTypeForMem(ValueTy);
340 DirectTy = DirectTy->getPointerTo(0);
342 Address Addr = emitVoidPtrDirectVAArg(CGF, VAListAddr, DirectTy,
343 DirectSize, DirectAlign,
348 Addr = Address(CGF.Builder.CreateLoad(Addr), ValueInfo.second);
355 static Address emitMergePHI(CodeGenFunction &CGF,
356 Address Addr1, llvm::BasicBlock *Block1,
357 Address Addr2, llvm::BasicBlock *Block2,
358 const llvm::Twine &Name = "") {
359 assert(Addr1.getType() == Addr2.getType());
360 llvm::PHINode *PHI = CGF.Builder.CreatePHI(Addr1.getType(), 2, Name);
361 PHI->addIncoming(Addr1.getPointer(), Block1);
362 PHI->addIncoming(Addr2.getPointer(), Block2);
363 CharUnits Align = std::min(Addr1.getAlignment(), Addr2.getAlignment());
364 return Address(PHI, Align);
367 TargetCodeGenInfo::~TargetCodeGenInfo() { delete Info; }
369 // If someone can figure out a general rule for this, that would be great.
370 // It's probably just doomed to be platform-dependent, though.
371 unsigned TargetCodeGenInfo::getSizeOfUnwindException() const {
373 // x86-64 FreeBSD, Linux, Darwin
374 // x86-32 FreeBSD, Linux, Darwin
375 // PowerPC Linux, Darwin
376 // ARM Darwin (*not* EABI)
381 bool TargetCodeGenInfo::isNoProtoCallVariadic(const CallArgList &args,
382 const FunctionNoProtoType *fnType) const {
383 // The following conventions are known to require this to be false:
386 // For everything else, we just prefer false unless we opt out.
391 TargetCodeGenInfo::getDependentLibraryOption(llvm::StringRef Lib,
392 llvm::SmallString<24> &Opt) const {
393 // This assumes the user is passing a library name like "rt" instead of a
394 // filename like "librt.a/so", and that they don't care whether it's static or
400 unsigned TargetCodeGenInfo::getOpenCLKernelCallingConv() const {
401 return llvm::CallingConv::C;
404 llvm::Constant *TargetCodeGenInfo::getNullPointer(const CodeGen::CodeGenModule &CGM,
405 llvm::PointerType *T, QualType QT) const {
406 return llvm::ConstantPointerNull::get(T);
409 llvm::Value *TargetCodeGenInfo::performAddrSpaceCast(
410 CodeGen::CodeGenFunction &CGF, llvm::Value *Src, QualType SrcTy,
411 QualType DestTy) const {
412 // Since target may map different address spaces in AST to the same address
413 // space, an address space conversion may end up as a bitcast.
414 return CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(Src,
415 CGF.ConvertType(DestTy));
418 static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays);
420 /// isEmptyField - Return true iff a the field is "empty", that is it
421 /// is an unnamed bit-field or an (array of) empty record(s).
422 static bool isEmptyField(ASTContext &Context, const FieldDecl *FD,
424 if (FD->isUnnamedBitfield())
427 QualType FT = FD->getType();
429 // Constant arrays of empty records count as empty, strip them off.
430 // Constant arrays of zero length always count as empty.
432 while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) {
433 if (AT->getSize() == 0)
435 FT = AT->getElementType();
438 const RecordType *RT = FT->getAs<RecordType>();
442 // C++ record fields are never empty, at least in the Itanium ABI.
444 // FIXME: We should use a predicate for whether this behavior is true in the
446 if (isa<CXXRecordDecl>(RT->getDecl()))
449 return isEmptyRecord(Context, FT, AllowArrays);
452 /// isEmptyRecord - Return true iff a structure contains only empty
453 /// fields. Note that a structure with a flexible array member is not
454 /// considered empty.
455 static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays) {
456 const RecordType *RT = T->getAs<RecordType>();
459 const RecordDecl *RD = RT->getDecl();
460 if (RD->hasFlexibleArrayMember())
463 // If this is a C++ record, check the bases first.
464 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
465 for (const auto &I : CXXRD->bases())
466 if (!isEmptyRecord(Context, I.getType(), true))
469 for (const auto *I : RD->fields())
470 if (!isEmptyField(Context, I, AllowArrays))
475 /// isSingleElementStruct - Determine if a structure is a "single
476 /// element struct", i.e. it has exactly one non-empty field or
477 /// exactly one field which is itself a single element
478 /// struct. Structures with flexible array members are never
479 /// considered single element structs.
481 /// \return The field declaration for the single non-empty field, if
483 static const Type *isSingleElementStruct(QualType T, ASTContext &Context) {
484 const RecordType *RT = T->getAs<RecordType>();
488 const RecordDecl *RD = RT->getDecl();
489 if (RD->hasFlexibleArrayMember())
492 const Type *Found = nullptr;
494 // If this is a C++ record, check the bases first.
495 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
496 for (const auto &I : CXXRD->bases()) {
497 // Ignore empty records.
498 if (isEmptyRecord(Context, I.getType(), true))
501 // If we already found an element then this isn't a single-element struct.
505 // If this is non-empty and not a single element struct, the composite
506 // cannot be a single element struct.
507 Found = isSingleElementStruct(I.getType(), Context);
513 // Check for single element.
514 for (const auto *FD : RD->fields()) {
515 QualType FT = FD->getType();
517 // Ignore empty fields.
518 if (isEmptyField(Context, FD, true))
521 // If we already found an element then this isn't a single-element
526 // Treat single element arrays as the element.
527 while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) {
528 if (AT->getSize().getZExtValue() != 1)
530 FT = AT->getElementType();
533 if (!isAggregateTypeForABI(FT)) {
534 Found = FT.getTypePtr();
536 Found = isSingleElementStruct(FT, Context);
542 // We don't consider a struct a single-element struct if it has
543 // padding beyond the element type.
544 if (Found && Context.getTypeSize(Found) != Context.getTypeSize(T))
551 Address EmitVAArgInstr(CodeGenFunction &CGF, Address VAListAddr, QualType Ty,
552 const ABIArgInfo &AI) {
553 // This default implementation defers to the llvm backend's va_arg
554 // instruction. It can handle only passing arguments directly
555 // (typically only handled in the backend for primitive types), or
556 // aggregates passed indirectly by pointer (NOTE: if the "byval"
557 // flag has ABI impact in the callee, this implementation cannot
560 // Only a few cases are covered here at the moment -- those needed
561 // by the default abi.
564 if (AI.isIndirect()) {
565 assert(!AI.getPaddingType() &&
566 "Unexpected PaddingType seen in arginfo in generic VAArg emitter!");
568 !AI.getIndirectRealign() &&
569 "Unexpected IndirectRealign seen in arginfo in generic VAArg emitter!");
571 auto TyInfo = CGF.getContext().getTypeInfoInChars(Ty);
572 CharUnits TyAlignForABI = TyInfo.second;
575 llvm::PointerType::getUnqual(CGF.ConvertTypeForMem(Ty));
577 CGF.Builder.CreateVAArg(VAListAddr.getPointer(), BaseTy);
578 return Address(Addr, TyAlignForABI);
580 assert((AI.isDirect() || AI.isExtend()) &&
581 "Unexpected ArgInfo Kind in generic VAArg emitter!");
583 assert(!AI.getInReg() &&
584 "Unexpected InReg seen in arginfo in generic VAArg emitter!");
585 assert(!AI.getPaddingType() &&
586 "Unexpected PaddingType seen in arginfo in generic VAArg emitter!");
587 assert(!AI.getDirectOffset() &&
588 "Unexpected DirectOffset seen in arginfo in generic VAArg emitter!");
589 assert(!AI.getCoerceToType() &&
590 "Unexpected CoerceToType seen in arginfo in generic VAArg emitter!");
592 Address Temp = CGF.CreateMemTemp(Ty, "varet");
593 Val = CGF.Builder.CreateVAArg(VAListAddr.getPointer(), CGF.ConvertType(Ty));
594 CGF.Builder.CreateStore(Val, Temp);
599 /// DefaultABIInfo - The default implementation for ABI specific
600 /// details. This implementation provides information which results in
601 /// self-consistent and sensible LLVM IR generation, but does not
602 /// conform to any particular ABI.
603 class DefaultABIInfo : public ABIInfo {
605 DefaultABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {}
607 ABIArgInfo classifyReturnType(QualType RetTy) const;
608 ABIArgInfo classifyArgumentType(QualType RetTy) const;
610 void computeInfo(CGFunctionInfo &FI) const override {
611 if (!getCXXABI().classifyReturnType(FI))
612 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
613 for (auto &I : FI.arguments())
614 I.info = classifyArgumentType(I.type);
617 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
618 QualType Ty) const override {
619 return EmitVAArgInstr(CGF, VAListAddr, Ty, classifyArgumentType(Ty));
623 class DefaultTargetCodeGenInfo : public TargetCodeGenInfo {
625 DefaultTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
626 : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {}
629 ABIArgInfo DefaultABIInfo::classifyArgumentType(QualType Ty) const {
630 Ty = useFirstFieldIfTransparentUnion(Ty);
632 if (isAggregateTypeForABI(Ty)) {
633 // Records with non-trivial destructors/copy-constructors should not be
635 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
636 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
638 return getNaturalAlignIndirect(Ty);
641 // Treat an enum type as its underlying type.
642 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
643 Ty = EnumTy->getDecl()->getIntegerType();
645 return (Ty->isPromotableIntegerType() ?
646 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
649 ABIArgInfo DefaultABIInfo::classifyReturnType(QualType RetTy) const {
650 if (RetTy->isVoidType())
651 return ABIArgInfo::getIgnore();
653 if (isAggregateTypeForABI(RetTy))
654 return getNaturalAlignIndirect(RetTy);
656 // Treat an enum type as its underlying type.
657 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
658 RetTy = EnumTy->getDecl()->getIntegerType();
660 return (RetTy->isPromotableIntegerType() ?
661 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
664 //===----------------------------------------------------------------------===//
665 // WebAssembly ABI Implementation
667 // This is a very simple ABI that relies a lot on DefaultABIInfo.
668 //===----------------------------------------------------------------------===//
670 class WebAssemblyABIInfo final : public DefaultABIInfo {
672 explicit WebAssemblyABIInfo(CodeGen::CodeGenTypes &CGT)
673 : DefaultABIInfo(CGT) {}
676 ABIArgInfo classifyReturnType(QualType RetTy) const;
677 ABIArgInfo classifyArgumentType(QualType Ty) const;
679 // DefaultABIInfo's classifyReturnType and classifyArgumentType are
680 // non-virtual, but computeInfo and EmitVAArg are virtual, so we
682 void computeInfo(CGFunctionInfo &FI) const override {
683 if (!getCXXABI().classifyReturnType(FI))
684 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
685 for (auto &Arg : FI.arguments())
686 Arg.info = classifyArgumentType(Arg.type);
689 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
690 QualType Ty) const override;
693 class WebAssemblyTargetCodeGenInfo final : public TargetCodeGenInfo {
695 explicit WebAssemblyTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
696 : TargetCodeGenInfo(new WebAssemblyABIInfo(CGT)) {}
699 /// \brief Classify argument of given type \p Ty.
700 ABIArgInfo WebAssemblyABIInfo::classifyArgumentType(QualType Ty) const {
701 Ty = useFirstFieldIfTransparentUnion(Ty);
703 if (isAggregateTypeForABI(Ty)) {
704 // Records with non-trivial destructors/copy-constructors should not be
706 if (auto RAA = getRecordArgABI(Ty, getCXXABI()))
707 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
708 // Ignore empty structs/unions.
709 if (isEmptyRecord(getContext(), Ty, true))
710 return ABIArgInfo::getIgnore();
711 // Lower single-element structs to just pass a regular value. TODO: We
712 // could do reasonable-size multiple-element structs too, using getExpand(),
713 // though watch out for things like bitfields.
714 if (const Type *SeltTy = isSingleElementStruct(Ty, getContext()))
715 return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
718 // Otherwise just do the default thing.
719 return DefaultABIInfo::classifyArgumentType(Ty);
722 ABIArgInfo WebAssemblyABIInfo::classifyReturnType(QualType RetTy) const {
723 if (isAggregateTypeForABI(RetTy)) {
724 // Records with non-trivial destructors/copy-constructors should not be
725 // returned by value.
726 if (!getRecordArgABI(RetTy, getCXXABI())) {
727 // Ignore empty structs/unions.
728 if (isEmptyRecord(getContext(), RetTy, true))
729 return ABIArgInfo::getIgnore();
730 // Lower single-element structs to just return a regular value. TODO: We
731 // could do reasonable-size multiple-element structs too, using
732 // ABIArgInfo::getDirect().
733 if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext()))
734 return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
738 // Otherwise just do the default thing.
739 return DefaultABIInfo::classifyReturnType(RetTy);
742 Address WebAssemblyABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
744 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*Indirect=*/ false,
745 getContext().getTypeInfoInChars(Ty),
746 CharUnits::fromQuantity(4),
747 /*AllowHigherAlign=*/ true);
750 //===----------------------------------------------------------------------===//
751 // le32/PNaCl bitcode ABI Implementation
753 // This is a simplified version of the x86_32 ABI. Arguments and return values
754 // are always passed on the stack.
755 //===----------------------------------------------------------------------===//
757 class PNaClABIInfo : public ABIInfo {
759 PNaClABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {}
761 ABIArgInfo classifyReturnType(QualType RetTy) const;
762 ABIArgInfo classifyArgumentType(QualType RetTy) const;
764 void computeInfo(CGFunctionInfo &FI) const override;
765 Address EmitVAArg(CodeGenFunction &CGF,
766 Address VAListAddr, QualType Ty) const override;
769 class PNaClTargetCodeGenInfo : public TargetCodeGenInfo {
771 PNaClTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
772 : TargetCodeGenInfo(new PNaClABIInfo(CGT)) {}
775 void PNaClABIInfo::computeInfo(CGFunctionInfo &FI) const {
776 if (!getCXXABI().classifyReturnType(FI))
777 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
779 for (auto &I : FI.arguments())
780 I.info = classifyArgumentType(I.type);
783 Address PNaClABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
785 // The PNaCL ABI is a bit odd, in that varargs don't use normal
786 // function classification. Structs get passed directly for varargs
787 // functions, through a rewriting transform in
788 // pnacl-llvm/lib/Transforms/NaCl/ExpandVarArgs.cpp, which allows
789 // this target to actually support a va_arg instructions with an
790 // aggregate type, unlike other targets.
791 return EmitVAArgInstr(CGF, VAListAddr, Ty, ABIArgInfo::getDirect());
794 /// \brief Classify argument of given type \p Ty.
795 ABIArgInfo PNaClABIInfo::classifyArgumentType(QualType Ty) const {
796 if (isAggregateTypeForABI(Ty)) {
797 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
798 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
799 return getNaturalAlignIndirect(Ty);
800 } else if (const EnumType *EnumTy = Ty->getAs<EnumType>()) {
801 // Treat an enum type as its underlying type.
802 Ty = EnumTy->getDecl()->getIntegerType();
803 } else if (Ty->isFloatingType()) {
804 // Floating-point types don't go inreg.
805 return ABIArgInfo::getDirect();
808 return (Ty->isPromotableIntegerType() ?
809 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
812 ABIArgInfo PNaClABIInfo::classifyReturnType(QualType RetTy) const {
813 if (RetTy->isVoidType())
814 return ABIArgInfo::getIgnore();
816 // In the PNaCl ABI we always return records/structures on the stack.
817 if (isAggregateTypeForABI(RetTy))
818 return getNaturalAlignIndirect(RetTy);
820 // Treat an enum type as its underlying type.
821 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
822 RetTy = EnumTy->getDecl()->getIntegerType();
824 return (RetTy->isPromotableIntegerType() ?
825 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
828 /// IsX86_MMXType - Return true if this is an MMX type.
829 bool IsX86_MMXType(llvm::Type *IRType) {
830 // Return true if the type is an MMX type <2 x i32>, <4 x i16>, or <8 x i8>.
831 return IRType->isVectorTy() && IRType->getPrimitiveSizeInBits() == 64 &&
832 cast<llvm::VectorType>(IRType)->getElementType()->isIntegerTy() &&
833 IRType->getScalarSizeInBits() != 64;
836 static llvm::Type* X86AdjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
837 StringRef Constraint,
839 if ((Constraint == "y" || Constraint == "&y") && Ty->isVectorTy()) {
840 if (cast<llvm::VectorType>(Ty)->getBitWidth() != 64) {
841 // Invalid MMX constraint
845 return llvm::Type::getX86_MMXTy(CGF.getLLVMContext());
848 // No operation needed
852 /// Returns true if this type can be passed in SSE registers with the
853 /// X86_VectorCall calling convention. Shared between x86_32 and x86_64.
854 static bool isX86VectorTypeForVectorCall(ASTContext &Context, QualType Ty) {
855 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
856 if (BT->isFloatingPoint() && BT->getKind() != BuiltinType::Half)
858 } else if (const VectorType *VT = Ty->getAs<VectorType>()) {
859 // vectorcall can pass XMM, YMM, and ZMM vectors. We don't pass SSE1 MMX
860 // registers specially.
861 unsigned VecSize = Context.getTypeSize(VT);
862 if (VecSize == 128 || VecSize == 256 || VecSize == 512)
868 /// Returns true if this aggregate is small enough to be passed in SSE registers
869 /// in the X86_VectorCall calling convention. Shared between x86_32 and x86_64.
870 static bool isX86VectorCallAggregateSmallEnough(uint64_t NumMembers) {
871 return NumMembers <= 4;
874 /// Returns a Homogeneous Vector Aggregate ABIArgInfo, used in X86.
875 static ABIArgInfo getDirectX86Hva(llvm::Type* T = nullptr) {
876 auto AI = ABIArgInfo::getDirect(T);
878 AI.setCanBeFlattened(false);
882 //===----------------------------------------------------------------------===//
883 // X86-32 ABI Implementation
884 //===----------------------------------------------------------------------===//
886 /// \brief Similar to llvm::CCState, but for Clang.
888 CCState(unsigned CC) : CC(CC), FreeRegs(0), FreeSSERegs(0) {}
892 unsigned FreeSSERegs;
896 // Vectorcall only allows the first 6 parameters to be passed in registers.
897 VectorcallMaxParamNumAsReg = 6
900 /// X86_32ABIInfo - The X86-32 ABI information.
901 class X86_32ABIInfo : public SwiftABIInfo {
907 static const unsigned MinABIStackAlignInBytes = 4;
909 bool IsDarwinVectorABI;
910 bool IsRetSmallStructInRegABI;
911 bool IsWin32StructABI;
914 unsigned DefaultNumRegisterParameters;
916 static bool isRegisterSize(unsigned Size) {
917 return (Size == 8 || Size == 16 || Size == 32 || Size == 64);
920 bool isHomogeneousAggregateBaseType(QualType Ty) const override {
921 // FIXME: Assumes vectorcall is in use.
922 return isX86VectorTypeForVectorCall(getContext(), Ty);
925 bool isHomogeneousAggregateSmallEnough(const Type *Ty,
926 uint64_t NumMembers) const override {
927 // FIXME: Assumes vectorcall is in use.
928 return isX86VectorCallAggregateSmallEnough(NumMembers);
931 bool shouldReturnTypeInRegister(QualType Ty, ASTContext &Context) const;
933 /// getIndirectResult - Give a source type \arg Ty, return a suitable result
934 /// such that the argument will be passed in memory.
935 ABIArgInfo getIndirectResult(QualType Ty, bool ByVal, CCState &State) const;
937 ABIArgInfo getIndirectReturnResult(QualType Ty, CCState &State) const;
939 /// \brief Return the alignment to use for the given type on the stack.
940 unsigned getTypeStackAlignInBytes(QualType Ty, unsigned Align) const;
942 Class classify(QualType Ty) const;
943 ABIArgInfo classifyReturnType(QualType RetTy, CCState &State) const;
944 ABIArgInfo classifyArgumentType(QualType RetTy, CCState &State) const;
945 ABIArgInfo reclassifyHvaArgType(QualType RetTy, CCState &State,
946 const ABIArgInfo& current) const;
947 /// \brief Updates the number of available free registers, returns
948 /// true if any registers were allocated.
949 bool updateFreeRegs(QualType Ty, CCState &State) const;
951 bool shouldAggregateUseDirect(QualType Ty, CCState &State, bool &InReg,
952 bool &NeedsPadding) const;
953 bool shouldPrimitiveUseInReg(QualType Ty, CCState &State) const;
955 bool canExpandIndirectArgument(QualType Ty) const;
957 /// \brief Rewrite the function info so that all memory arguments use
959 void rewriteWithInAlloca(CGFunctionInfo &FI) const;
961 void addFieldToArgStruct(SmallVector<llvm::Type *, 6> &FrameFields,
962 CharUnits &StackOffset, ABIArgInfo &Info,
963 QualType Type) const;
964 void computeVectorCallArgs(CGFunctionInfo &FI, CCState &State,
965 bool &UsedInAlloca) const;
969 void computeInfo(CGFunctionInfo &FI) const override;
970 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
971 QualType Ty) const override;
973 X86_32ABIInfo(CodeGen::CodeGenTypes &CGT, bool DarwinVectorABI,
974 bool RetSmallStructInRegABI, bool Win32StructABI,
975 unsigned NumRegisterParameters, bool SoftFloatABI)
976 : SwiftABIInfo(CGT), IsDarwinVectorABI(DarwinVectorABI),
977 IsRetSmallStructInRegABI(RetSmallStructInRegABI),
978 IsWin32StructABI(Win32StructABI),
979 IsSoftFloatABI(SoftFloatABI),
980 IsMCUABI(CGT.getTarget().getTriple().isOSIAMCU()),
981 DefaultNumRegisterParameters(NumRegisterParameters) {}
983 bool shouldPassIndirectlyForSwift(CharUnits totalSize,
984 ArrayRef<llvm::Type*> scalars,
985 bool asReturnValue) const override {
986 // LLVM's x86-32 lowering currently only assigns up to three
987 // integer registers and three fp registers. Oddly, it'll use up to
988 // four vector registers for vectors, but those can overlap with the
990 return occupiesMoreThan(CGT, scalars, /*total*/ 3);
993 bool isSwiftErrorInRegister() const override {
994 // x86-32 lowering does not support passing swifterror in a register.
999 class X86_32TargetCodeGenInfo : public TargetCodeGenInfo {
1001 X86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool DarwinVectorABI,
1002 bool RetSmallStructInRegABI, bool Win32StructABI,
1003 unsigned NumRegisterParameters, bool SoftFloatABI)
1004 : TargetCodeGenInfo(new X86_32ABIInfo(
1005 CGT, DarwinVectorABI, RetSmallStructInRegABI, Win32StructABI,
1006 NumRegisterParameters, SoftFloatABI)) {}
1008 static bool isStructReturnInRegABI(
1009 const llvm::Triple &Triple, const CodeGenOptions &Opts);
1011 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
1012 CodeGen::CodeGenModule &CGM) const override;
1014 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
1015 // Darwin uses different dwarf register numbers for EH.
1016 if (CGM.getTarget().getTriple().isOSDarwin()) return 5;
1020 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
1021 llvm::Value *Address) const override;
1023 llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
1024 StringRef Constraint,
1025 llvm::Type* Ty) const override {
1026 return X86AdjustInlineAsmType(CGF, Constraint, Ty);
1029 void addReturnRegisterOutputs(CodeGenFunction &CGF, LValue ReturnValue,
1030 std::string &Constraints,
1031 std::vector<llvm::Type *> &ResultRegTypes,
1032 std::vector<llvm::Type *> &ResultTruncRegTypes,
1033 std::vector<LValue> &ResultRegDests,
1034 std::string &AsmString,
1035 unsigned NumOutputs) const override;
1038 getUBSanFunctionSignature(CodeGen::CodeGenModule &CGM) const override {
1039 unsigned Sig = (0xeb << 0) | // jmp rel8
1040 (0x06 << 8) | // .+0x08
1043 return llvm::ConstantInt::get(CGM.Int32Ty, Sig);
1046 StringRef getARCRetainAutoreleasedReturnValueMarker() const override {
1047 return "movl\t%ebp, %ebp"
1048 "\t\t## marker for objc_retainAutoreleaseReturnValue";
1054 /// Rewrite input constraint references after adding some output constraints.
1055 /// In the case where there is one output and one input and we add one output,
1056 /// we need to replace all operand references greater than or equal to 1:
1059 /// The result will be:
1062 static void rewriteInputConstraintReferences(unsigned FirstIn,
1063 unsigned NumNewOuts,
1064 std::string &AsmString) {
1066 llvm::raw_string_ostream OS(Buf);
1068 while (Pos < AsmString.size()) {
1069 size_t DollarStart = AsmString.find('$', Pos);
1070 if (DollarStart == std::string::npos)
1071 DollarStart = AsmString.size();
1072 size_t DollarEnd = AsmString.find_first_not_of('$', DollarStart);
1073 if (DollarEnd == std::string::npos)
1074 DollarEnd = AsmString.size();
1075 OS << StringRef(&AsmString[Pos], DollarEnd - Pos);
1077 size_t NumDollars = DollarEnd - DollarStart;
1078 if (NumDollars % 2 != 0 && Pos < AsmString.size()) {
1079 // We have an operand reference.
1080 size_t DigitStart = Pos;
1081 size_t DigitEnd = AsmString.find_first_not_of("0123456789", DigitStart);
1082 if (DigitEnd == std::string::npos)
1083 DigitEnd = AsmString.size();
1084 StringRef OperandStr(&AsmString[DigitStart], DigitEnd - DigitStart);
1085 unsigned OperandIndex;
1086 if (!OperandStr.getAsInteger(10, OperandIndex)) {
1087 if (OperandIndex >= FirstIn)
1088 OperandIndex += NumNewOuts;
1096 AsmString = std::move(OS.str());
1099 /// Add output constraints for EAX:EDX because they are return registers.
1100 void X86_32TargetCodeGenInfo::addReturnRegisterOutputs(
1101 CodeGenFunction &CGF, LValue ReturnSlot, std::string &Constraints,
1102 std::vector<llvm::Type *> &ResultRegTypes,
1103 std::vector<llvm::Type *> &ResultTruncRegTypes,
1104 std::vector<LValue> &ResultRegDests, std::string &AsmString,
1105 unsigned NumOutputs) const {
1106 uint64_t RetWidth = CGF.getContext().getTypeSize(ReturnSlot.getType());
1108 // Use the EAX constraint if the width is 32 or smaller and EAX:EDX if it is
1110 if (!Constraints.empty())
1112 if (RetWidth <= 32) {
1113 Constraints += "={eax}";
1114 ResultRegTypes.push_back(CGF.Int32Ty);
1116 // Use the 'A' constraint for EAX:EDX.
1117 Constraints += "=A";
1118 ResultRegTypes.push_back(CGF.Int64Ty);
1121 // Truncate EAX or EAX:EDX to an integer of the appropriate size.
1122 llvm::Type *CoerceTy = llvm::IntegerType::get(CGF.getLLVMContext(), RetWidth);
1123 ResultTruncRegTypes.push_back(CoerceTy);
1125 // Coerce the integer by bitcasting the return slot pointer.
1126 ReturnSlot.setAddress(CGF.Builder.CreateBitCast(ReturnSlot.getAddress(),
1127 CoerceTy->getPointerTo()));
1128 ResultRegDests.push_back(ReturnSlot);
1130 rewriteInputConstraintReferences(NumOutputs, 1, AsmString);
1133 /// shouldReturnTypeInRegister - Determine if the given type should be
1134 /// returned in a register (for the Darwin and MCU ABI).
1135 bool X86_32ABIInfo::shouldReturnTypeInRegister(QualType Ty,
1136 ASTContext &Context) const {
1137 uint64_t Size = Context.getTypeSize(Ty);
1139 // For i386, type must be register sized.
1140 // For the MCU ABI, it only needs to be <= 8-byte
1141 if ((IsMCUABI && Size > 64) || (!IsMCUABI && !isRegisterSize(Size)))
1144 if (Ty->isVectorType()) {
1145 // 64- and 128- bit vectors inside structures are not returned in
1147 if (Size == 64 || Size == 128)
1153 // If this is a builtin, pointer, enum, complex type, member pointer, or
1154 // member function pointer it is ok.
1155 if (Ty->getAs<BuiltinType>() || Ty->hasPointerRepresentation() ||
1156 Ty->isAnyComplexType() || Ty->isEnumeralType() ||
1157 Ty->isBlockPointerType() || Ty->isMemberPointerType())
1160 // Arrays are treated like records.
1161 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty))
1162 return shouldReturnTypeInRegister(AT->getElementType(), Context);
1164 // Otherwise, it must be a record type.
1165 const RecordType *RT = Ty->getAs<RecordType>();
1166 if (!RT) return false;
1168 // FIXME: Traverse bases here too.
1170 // Structure types are passed in register if all fields would be
1171 // passed in a register.
1172 for (const auto *FD : RT->getDecl()->fields()) {
1173 // Empty fields are ignored.
1174 if (isEmptyField(Context, FD, true))
1177 // Check fields recursively.
1178 if (!shouldReturnTypeInRegister(FD->getType(), Context))
1184 static bool is32Or64BitBasicType(QualType Ty, ASTContext &Context) {
1185 // Treat complex types as the element type.
1186 if (const ComplexType *CTy = Ty->getAs<ComplexType>())
1187 Ty = CTy->getElementType();
1189 // Check for a type which we know has a simple scalar argument-passing
1190 // convention without any padding. (We're specifically looking for 32
1191 // and 64-bit integer and integer-equivalents, float, and double.)
1192 if (!Ty->getAs<BuiltinType>() && !Ty->hasPointerRepresentation() &&
1193 !Ty->isEnumeralType() && !Ty->isBlockPointerType())
1196 uint64_t Size = Context.getTypeSize(Ty);
1197 return Size == 32 || Size == 64;
1200 static bool addFieldSizes(ASTContext &Context, const RecordDecl *RD,
1202 for (const auto *FD : RD->fields()) {
1203 // Scalar arguments on the stack get 4 byte alignment on x86. If the
1204 // argument is smaller than 32-bits, expanding the struct will create
1205 // alignment padding.
1206 if (!is32Or64BitBasicType(FD->getType(), Context))
1209 // FIXME: Reject bit-fields wholesale; there are two problems, we don't know
1210 // how to expand them yet, and the predicate for telling if a bitfield still
1211 // counts as "basic" is more complicated than what we were doing previously.
1212 if (FD->isBitField())
1215 Size += Context.getTypeSize(FD->getType());
1220 static bool addBaseAndFieldSizes(ASTContext &Context, const CXXRecordDecl *RD,
1222 // Don't do this if there are any non-empty bases.
1223 for (const CXXBaseSpecifier &Base : RD->bases()) {
1224 if (!addBaseAndFieldSizes(Context, Base.getType()->getAsCXXRecordDecl(),
1228 if (!addFieldSizes(Context, RD, Size))
1233 /// Test whether an argument type which is to be passed indirectly (on the
1234 /// stack) would have the equivalent layout if it was expanded into separate
1235 /// arguments. If so, we prefer to do the latter to avoid inhibiting
1237 bool X86_32ABIInfo::canExpandIndirectArgument(QualType Ty) const {
1238 // We can only expand structure types.
1239 const RecordType *RT = Ty->getAs<RecordType>();
1242 const RecordDecl *RD = RT->getDecl();
1244 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
1245 if (!IsWin32StructABI) {
1246 // On non-Windows, we have to conservatively match our old bitcode
1247 // prototypes in order to be ABI-compatible at the bitcode level.
1248 if (!CXXRD->isCLike())
1251 // Don't do this for dynamic classes.
1252 if (CXXRD->isDynamicClass())
1255 if (!addBaseAndFieldSizes(getContext(), CXXRD, Size))
1258 if (!addFieldSizes(getContext(), RD, Size))
1262 // We can do this if there was no alignment padding.
1263 return Size == getContext().getTypeSize(Ty);
1266 ABIArgInfo X86_32ABIInfo::getIndirectReturnResult(QualType RetTy, CCState &State) const {
1267 // If the return value is indirect, then the hidden argument is consuming one
1268 // integer register.
1269 if (State.FreeRegs) {
1272 return getNaturalAlignIndirectInReg(RetTy);
1274 return getNaturalAlignIndirect(RetTy, /*ByVal=*/false);
1277 ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy,
1278 CCState &State) const {
1279 if (RetTy->isVoidType())
1280 return ABIArgInfo::getIgnore();
1282 const Type *Base = nullptr;
1283 uint64_t NumElts = 0;
1284 if ((State.CC == llvm::CallingConv::X86_VectorCall ||
1285 State.CC == llvm::CallingConv::X86_RegCall) &&
1286 isHomogeneousAggregate(RetTy, Base, NumElts)) {
1287 // The LLVM struct type for such an aggregate should lower properly.
1288 return ABIArgInfo::getDirect();
1291 if (const VectorType *VT = RetTy->getAs<VectorType>()) {
1292 // On Darwin, some vectors are returned in registers.
1293 if (IsDarwinVectorABI) {
1294 uint64_t Size = getContext().getTypeSize(RetTy);
1296 // 128-bit vectors are a special case; they are returned in
1297 // registers and we need to make sure to pick a type the LLVM
1298 // backend will like.
1300 return ABIArgInfo::getDirect(llvm::VectorType::get(
1301 llvm::Type::getInt64Ty(getVMContext()), 2));
1303 // Always return in register if it fits in a general purpose
1304 // register, or if it is 64 bits and has a single element.
1305 if ((Size == 8 || Size == 16 || Size == 32) ||
1306 (Size == 64 && VT->getNumElements() == 1))
1307 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
1310 return getIndirectReturnResult(RetTy, State);
1313 return ABIArgInfo::getDirect();
1316 if (isAggregateTypeForABI(RetTy)) {
1317 if (const RecordType *RT = RetTy->getAs<RecordType>()) {
1318 // Structures with flexible arrays are always indirect.
1319 if (RT->getDecl()->hasFlexibleArrayMember())
1320 return getIndirectReturnResult(RetTy, State);
1323 // If specified, structs and unions are always indirect.
1324 if (!IsRetSmallStructInRegABI && !RetTy->isAnyComplexType())
1325 return getIndirectReturnResult(RetTy, State);
1327 // Ignore empty structs/unions.
1328 if (isEmptyRecord(getContext(), RetTy, true))
1329 return ABIArgInfo::getIgnore();
1331 // Small structures which are register sized are generally returned
1333 if (shouldReturnTypeInRegister(RetTy, getContext())) {
1334 uint64_t Size = getContext().getTypeSize(RetTy);
1336 // As a special-case, if the struct is a "single-element" struct, and
1337 // the field is of type "float" or "double", return it in a
1338 // floating-point register. (MSVC does not apply this special case.)
1339 // We apply a similar transformation for pointer types to improve the
1340 // quality of the generated IR.
1341 if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext()))
1342 if ((!IsWin32StructABI && SeltTy->isRealFloatingType())
1343 || SeltTy->hasPointerRepresentation())
1344 return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
1346 // FIXME: We should be able to narrow this integer in cases with dead
1348 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),Size));
1351 return getIndirectReturnResult(RetTy, State);
1354 // Treat an enum type as its underlying type.
1355 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
1356 RetTy = EnumTy->getDecl()->getIntegerType();
1358 return (RetTy->isPromotableIntegerType() ?
1359 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
1362 static bool isSSEVectorType(ASTContext &Context, QualType Ty) {
1363 return Ty->getAs<VectorType>() && Context.getTypeSize(Ty) == 128;
1366 static bool isRecordWithSSEVectorType(ASTContext &Context, QualType Ty) {
1367 const RecordType *RT = Ty->getAs<RecordType>();
1370 const RecordDecl *RD = RT->getDecl();
1372 // If this is a C++ record, check the bases first.
1373 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
1374 for (const auto &I : CXXRD->bases())
1375 if (!isRecordWithSSEVectorType(Context, I.getType()))
1378 for (const auto *i : RD->fields()) {
1379 QualType FT = i->getType();
1381 if (isSSEVectorType(Context, FT))
1384 if (isRecordWithSSEVectorType(Context, FT))
1391 unsigned X86_32ABIInfo::getTypeStackAlignInBytes(QualType Ty,
1392 unsigned Align) const {
1393 // Otherwise, if the alignment is less than or equal to the minimum ABI
1394 // alignment, just use the default; the backend will handle this.
1395 if (Align <= MinABIStackAlignInBytes)
1396 return 0; // Use default alignment.
1398 // On non-Darwin, the stack type alignment is always 4.
1399 if (!IsDarwinVectorABI) {
1400 // Set explicit alignment, since we may need to realign the top.
1401 return MinABIStackAlignInBytes;
1404 // Otherwise, if the type contains an SSE vector type, the alignment is 16.
1405 if (Align >= 16 && (isSSEVectorType(getContext(), Ty) ||
1406 isRecordWithSSEVectorType(getContext(), Ty)))
1409 return MinABIStackAlignInBytes;
1412 ABIArgInfo X86_32ABIInfo::getIndirectResult(QualType Ty, bool ByVal,
1413 CCState &State) const {
1415 if (State.FreeRegs) {
1416 --State.FreeRegs; // Non-byval indirects just use one pointer.
1418 return getNaturalAlignIndirectInReg(Ty);
1420 return getNaturalAlignIndirect(Ty, false);
1423 // Compute the byval alignment.
1424 unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8;
1425 unsigned StackAlign = getTypeStackAlignInBytes(Ty, TypeAlign);
1426 if (StackAlign == 0)
1427 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(4), /*ByVal=*/true);
1429 // If the stack alignment is less than the type alignment, realign the
1431 bool Realign = TypeAlign > StackAlign;
1432 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(StackAlign),
1433 /*ByVal=*/true, Realign);
1436 X86_32ABIInfo::Class X86_32ABIInfo::classify(QualType Ty) const {
1437 const Type *T = isSingleElementStruct(Ty, getContext());
1439 T = Ty.getTypePtr();
1441 if (const BuiltinType *BT = T->getAs<BuiltinType>()) {
1442 BuiltinType::Kind K = BT->getKind();
1443 if (K == BuiltinType::Float || K == BuiltinType::Double)
1449 bool X86_32ABIInfo::updateFreeRegs(QualType Ty, CCState &State) const {
1450 if (!IsSoftFloatABI) {
1451 Class C = classify(Ty);
1456 unsigned Size = getContext().getTypeSize(Ty);
1457 unsigned SizeInRegs = (Size + 31) / 32;
1459 if (SizeInRegs == 0)
1463 if (SizeInRegs > State.FreeRegs) {
1468 // The MCU psABI allows passing parameters in-reg even if there are
1469 // earlier parameters that are passed on the stack. Also,
1470 // it does not allow passing >8-byte structs in-register,
1471 // even if there are 3 free registers available.
1472 if (SizeInRegs > State.FreeRegs || SizeInRegs > 2)
1476 State.FreeRegs -= SizeInRegs;
1480 bool X86_32ABIInfo::shouldAggregateUseDirect(QualType Ty, CCState &State,
1482 bool &NeedsPadding) const {
1483 // On Windows, aggregates other than HFAs are never passed in registers, and
1484 // they do not consume register slots. Homogenous floating-point aggregates
1485 // (HFAs) have already been dealt with at this point.
1486 if (IsWin32StructABI && isAggregateTypeForABI(Ty))
1489 NeedsPadding = false;
1492 if (!updateFreeRegs(Ty, State))
1498 if (State.CC == llvm::CallingConv::X86_FastCall ||
1499 State.CC == llvm::CallingConv::X86_VectorCall ||
1500 State.CC == llvm::CallingConv::X86_RegCall) {
1501 if (getContext().getTypeSize(Ty) <= 32 && State.FreeRegs)
1502 NeedsPadding = true;
1510 bool X86_32ABIInfo::shouldPrimitiveUseInReg(QualType Ty, CCState &State) const {
1511 if (!updateFreeRegs(Ty, State))
1517 if (State.CC == llvm::CallingConv::X86_FastCall ||
1518 State.CC == llvm::CallingConv::X86_VectorCall ||
1519 State.CC == llvm::CallingConv::X86_RegCall) {
1520 if (getContext().getTypeSize(Ty) > 32)
1523 return (Ty->isIntegralOrEnumerationType() || Ty->isPointerType() ||
1524 Ty->isReferenceType());
1531 X86_32ABIInfo::reclassifyHvaArgType(QualType Ty, CCState &State,
1532 const ABIArgInfo ¤t) const {
1533 // Assumes vectorCall calling convention.
1534 const Type *Base = nullptr;
1535 uint64_t NumElts = 0;
1537 if (!Ty->isBuiltinType() && !Ty->isVectorType() &&
1538 isHomogeneousAggregate(Ty, Base, NumElts)) {
1539 if (State.FreeSSERegs >= NumElts) {
1540 // HVA types get passed directly in registers if there is room.
1541 State.FreeSSERegs -= NumElts;
1542 return getDirectX86Hva();
1544 // If there's no room, the HVA gets passed as normal indirect
1546 return getIndirectResult(Ty, /*ByVal=*/false, State);
1551 ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty,
1552 CCState &State) const {
1553 // FIXME: Set alignment on indirect arguments.
1555 Ty = useFirstFieldIfTransparentUnion(Ty);
1557 // Check with the C++ ABI first.
1558 const RecordType *RT = Ty->getAs<RecordType>();
1560 CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI());
1561 if (RAA == CGCXXABI::RAA_Indirect) {
1562 return getIndirectResult(Ty, false, State);
1563 } else if (RAA == CGCXXABI::RAA_DirectInMemory) {
1564 // The field index doesn't matter, we'll fix it up later.
1565 return ABIArgInfo::getInAlloca(/*FieldIndex=*/0);
1569 // vectorcall adds the concept of a homogenous vector aggregate, similar
1570 // to other targets, regcall uses some of the HVA rules.
1571 const Type *Base = nullptr;
1572 uint64_t NumElts = 0;
1573 if ((State.CC == llvm::CallingConv::X86_VectorCall ||
1574 State.CC == llvm::CallingConv::X86_RegCall) &&
1575 isHomogeneousAggregate(Ty, Base, NumElts)) {
1577 if (State.CC == llvm::CallingConv::X86_RegCall) {
1578 if (State.FreeSSERegs >= NumElts) {
1579 State.FreeSSERegs -= NumElts;
1580 if (Ty->isBuiltinType() || Ty->isVectorType())
1581 return ABIArgInfo::getDirect();
1582 return ABIArgInfo::getExpand();
1585 return getIndirectResult(Ty, /*ByVal=*/false, State);
1586 } else if (State.CC == llvm::CallingConv::X86_VectorCall) {
1587 if (State.FreeSSERegs >= NumElts && (Ty->isBuiltinType() || Ty->isVectorType())) {
1588 // Actual floating-point types get registers first time through if
1589 // there is registers available
1590 State.FreeSSERegs -= NumElts;
1591 return ABIArgInfo::getDirect();
1592 } else if (!Ty->isBuiltinType() && !Ty->isVectorType()) {
1593 // HVA Types only get registers after everything else has been
1594 // set, so it gets set as indirect for now.
1595 return ABIArgInfo::getIndirect(getContext().getTypeAlignInChars(Ty));
1600 if (isAggregateTypeForABI(Ty)) {
1601 // Structures with flexible arrays are always indirect.
1602 // FIXME: This should not be byval!
1603 if (RT && RT->getDecl()->hasFlexibleArrayMember())
1604 return getIndirectResult(Ty, true, State);
1606 // Ignore empty structs/unions on non-Windows.
1607 if (!IsWin32StructABI && isEmptyRecord(getContext(), Ty, true))
1608 return ABIArgInfo::getIgnore();
1610 llvm::LLVMContext &LLVMContext = getVMContext();
1611 llvm::IntegerType *Int32 = llvm::Type::getInt32Ty(LLVMContext);
1612 bool NeedsPadding = false;
1614 if (shouldAggregateUseDirect(Ty, State, InReg, NeedsPadding)) {
1615 unsigned SizeInRegs = (getContext().getTypeSize(Ty) + 31) / 32;
1616 SmallVector<llvm::Type*, 3> Elements(SizeInRegs, Int32);
1617 llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements);
1619 return ABIArgInfo::getDirectInReg(Result);
1621 return ABIArgInfo::getDirect(Result);
1623 llvm::IntegerType *PaddingType = NeedsPadding ? Int32 : nullptr;
1625 // Expand small (<= 128-bit) record types when we know that the stack layout
1626 // of those arguments will match the struct. This is important because the
1627 // LLVM backend isn't smart enough to remove byval, which inhibits many
1629 // Don't do this for the MCU if there are still free integer registers
1630 // (see X86_64 ABI for full explanation).
1631 if (getContext().getTypeSize(Ty) <= 4 * 32 &&
1632 (!IsMCUABI || State.FreeRegs == 0) && canExpandIndirectArgument(Ty))
1633 return ABIArgInfo::getExpandWithPadding(
1634 State.CC == llvm::CallingConv::X86_FastCall ||
1635 State.CC == llvm::CallingConv::X86_VectorCall ||
1636 State.CC == llvm::CallingConv::X86_RegCall,
1639 return getIndirectResult(Ty, true, State);
1642 if (const VectorType *VT = Ty->getAs<VectorType>()) {
1643 // On Darwin, some vectors are passed in memory, we handle this by passing
1644 // it as an i8/i16/i32/i64.
1645 if (IsDarwinVectorABI) {
1646 uint64_t Size = getContext().getTypeSize(Ty);
1647 if ((Size == 8 || Size == 16 || Size == 32) ||
1648 (Size == 64 && VT->getNumElements() == 1))
1649 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
1653 if (IsX86_MMXType(CGT.ConvertType(Ty)))
1654 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 64));
1656 return ABIArgInfo::getDirect();
1660 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
1661 Ty = EnumTy->getDecl()->getIntegerType();
1663 bool InReg = shouldPrimitiveUseInReg(Ty, State);
1665 if (Ty->isPromotableIntegerType()) {
1667 return ABIArgInfo::getExtendInReg();
1668 return ABIArgInfo::getExtend();
1672 return ABIArgInfo::getDirectInReg();
1673 return ABIArgInfo::getDirect();
1676 void X86_32ABIInfo::computeVectorCallArgs(CGFunctionInfo &FI, CCState &State,
1677 bool &UsedInAlloca) const {
1678 // Vectorcall only allows the first 6 parameters to be passed in registers,
1679 // and homogeneous vector aggregates are only put into registers as a second
1682 CCState ZeroState = State;
1683 ZeroState.FreeRegs = ZeroState.FreeSSERegs = 0;
1684 // HVAs must be done as a second priority for registers, so the deferred
1685 // items are dealt with by going through the pattern a second time.
1686 for (auto &I : FI.arguments()) {
1687 if (Count < VectorcallMaxParamNumAsReg)
1688 I.info = classifyArgumentType(I.type, State);
1690 // Parameters after the 6th cannot be passed in registers,
1691 // so pretend there are no registers left for them.
1692 I.info = classifyArgumentType(I.type, ZeroState);
1693 UsedInAlloca |= (I.info.getKind() == ABIArgInfo::InAlloca);
1697 // Go through the arguments a second time to get HVAs registers if there
1698 // are still some available.
1699 for (auto &I : FI.arguments()) {
1700 if (Count < VectorcallMaxParamNumAsReg)
1701 I.info = reclassifyHvaArgType(I.type, State, I.info);
1706 void X86_32ABIInfo::computeInfo(CGFunctionInfo &FI) const {
1707 CCState State(FI.getCallingConvention());
1710 else if (State.CC == llvm::CallingConv::X86_FastCall)
1712 else if (State.CC == llvm::CallingConv::X86_VectorCall) {
1714 State.FreeSSERegs = 6;
1715 } else if (FI.getHasRegParm())
1716 State.FreeRegs = FI.getRegParm();
1717 else if (State.CC == llvm::CallingConv::X86_RegCall) {
1719 State.FreeSSERegs = 8;
1721 State.FreeRegs = DefaultNumRegisterParameters;
1723 if (!getCXXABI().classifyReturnType(FI)) {
1724 FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), State);
1725 } else if (FI.getReturnInfo().isIndirect()) {
1726 // The C++ ABI is not aware of register usage, so we have to check if the
1727 // return value was sret and put it in a register ourselves if appropriate.
1728 if (State.FreeRegs) {
1729 --State.FreeRegs; // The sret parameter consumes a register.
1731 FI.getReturnInfo().setInReg(true);
1735 // The chain argument effectively gives us another free register.
1736 if (FI.isChainCall())
1739 bool UsedInAlloca = false;
1740 if (State.CC == llvm::CallingConv::X86_VectorCall) {
1741 computeVectorCallArgs(FI, State, UsedInAlloca);
1743 // If not vectorcall, revert to normal behavior.
1744 for (auto &I : FI.arguments()) {
1745 I.info = classifyArgumentType(I.type, State);
1746 UsedInAlloca |= (I.info.getKind() == ABIArgInfo::InAlloca);
1750 // If we needed to use inalloca for any argument, do a second pass and rewrite
1751 // all the memory arguments to use inalloca.
1753 rewriteWithInAlloca(FI);
1757 X86_32ABIInfo::addFieldToArgStruct(SmallVector<llvm::Type *, 6> &FrameFields,
1758 CharUnits &StackOffset, ABIArgInfo &Info,
1759 QualType Type) const {
1760 // Arguments are always 4-byte-aligned.
1761 CharUnits FieldAlign = CharUnits::fromQuantity(4);
1763 assert(StackOffset.isMultipleOf(FieldAlign) && "unaligned inalloca struct");
1764 Info = ABIArgInfo::getInAlloca(FrameFields.size());
1765 FrameFields.push_back(CGT.ConvertTypeForMem(Type));
1766 StackOffset += getContext().getTypeSizeInChars(Type);
1768 // Insert padding bytes to respect alignment.
1769 CharUnits FieldEnd = StackOffset;
1770 StackOffset = FieldEnd.alignTo(FieldAlign);
1771 if (StackOffset != FieldEnd) {
1772 CharUnits NumBytes = StackOffset - FieldEnd;
1773 llvm::Type *Ty = llvm::Type::getInt8Ty(getVMContext());
1774 Ty = llvm::ArrayType::get(Ty, NumBytes.getQuantity());
1775 FrameFields.push_back(Ty);
1779 static bool isArgInAlloca(const ABIArgInfo &Info) {
1780 // Leave ignored and inreg arguments alone.
1781 switch (Info.getKind()) {
1782 case ABIArgInfo::InAlloca:
1784 case ABIArgInfo::Indirect:
1785 assert(Info.getIndirectByVal());
1787 case ABIArgInfo::Ignore:
1789 case ABIArgInfo::Direct:
1790 case ABIArgInfo::Extend:
1791 if (Info.getInReg())
1794 case ABIArgInfo::Expand:
1795 case ABIArgInfo::CoerceAndExpand:
1796 // These are aggregate types which are never passed in registers when
1797 // inalloca is involved.
1800 llvm_unreachable("invalid enum");
1803 void X86_32ABIInfo::rewriteWithInAlloca(CGFunctionInfo &FI) const {
1804 assert(IsWin32StructABI && "inalloca only supported on win32");
1806 // Build a packed struct type for all of the arguments in memory.
1807 SmallVector<llvm::Type *, 6> FrameFields;
1809 // The stack alignment is always 4.
1810 CharUnits StackAlign = CharUnits::fromQuantity(4);
1812 CharUnits StackOffset;
1813 CGFunctionInfo::arg_iterator I = FI.arg_begin(), E = FI.arg_end();
1815 // Put 'this' into the struct before 'sret', if necessary.
1817 FI.getCallingConvention() == llvm::CallingConv::X86_ThisCall;
1818 ABIArgInfo &Ret = FI.getReturnInfo();
1819 if (Ret.isIndirect() && Ret.isSRetAfterThis() && !IsThisCall &&
1820 isArgInAlloca(I->info)) {
1821 addFieldToArgStruct(FrameFields, StackOffset, I->info, I->type);
1825 // Put the sret parameter into the inalloca struct if it's in memory.
1826 if (Ret.isIndirect() && !Ret.getInReg()) {
1827 CanQualType PtrTy = getContext().getPointerType(FI.getReturnType());
1828 addFieldToArgStruct(FrameFields, StackOffset, Ret, PtrTy);
1829 // On Windows, the hidden sret parameter is always returned in eax.
1830 Ret.setInAllocaSRet(IsWin32StructABI);
1833 // Skip the 'this' parameter in ecx.
1837 // Put arguments passed in memory into the struct.
1838 for (; I != E; ++I) {
1839 if (isArgInAlloca(I->info))
1840 addFieldToArgStruct(FrameFields, StackOffset, I->info, I->type);
1843 FI.setArgStruct(llvm::StructType::get(getVMContext(), FrameFields,
1848 Address X86_32ABIInfo::EmitVAArg(CodeGenFunction &CGF,
1849 Address VAListAddr, QualType Ty) const {
1851 auto TypeInfo = getContext().getTypeInfoInChars(Ty);
1853 // x86-32 changes the alignment of certain arguments on the stack.
1855 // Just messing with TypeInfo like this works because we never pass
1856 // anything indirectly.
1857 TypeInfo.second = CharUnits::fromQuantity(
1858 getTypeStackAlignInBytes(Ty, TypeInfo.second.getQuantity()));
1860 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*Indirect*/ false,
1861 TypeInfo, CharUnits::fromQuantity(4),
1862 /*AllowHigherAlign*/ true);
1865 bool X86_32TargetCodeGenInfo::isStructReturnInRegABI(
1866 const llvm::Triple &Triple, const CodeGenOptions &Opts) {
1867 assert(Triple.getArch() == llvm::Triple::x86);
1869 switch (Opts.getStructReturnConvention()) {
1870 case CodeGenOptions::SRCK_Default:
1872 case CodeGenOptions::SRCK_OnStack: // -fpcc-struct-return
1874 case CodeGenOptions::SRCK_InRegs: // -freg-struct-return
1878 if (Triple.isOSDarwin() || Triple.isOSIAMCU())
1881 switch (Triple.getOS()) {
1882 case llvm::Triple::DragonFly:
1883 case llvm::Triple::FreeBSD:
1884 case llvm::Triple::OpenBSD:
1885 case llvm::Triple::Bitrig:
1886 case llvm::Triple::Win32:
1893 void X86_32TargetCodeGenInfo::setTargetAttributes(const Decl *D,
1894 llvm::GlobalValue *GV,
1895 CodeGen::CodeGenModule &CGM) const {
1896 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
1897 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
1898 // Get the LLVM function.
1899 llvm::Function *Fn = cast<llvm::Function>(GV);
1901 // Now add the 'alignstack' attribute with a value of 16.
1902 llvm::AttrBuilder B;
1903 B.addStackAlignmentAttr(16);
1904 Fn->addAttributes(llvm::AttributeList::FunctionIndex, B);
1906 if (FD->hasAttr<AnyX86InterruptAttr>()) {
1907 llvm::Function *Fn = cast<llvm::Function>(GV);
1908 Fn->setCallingConv(llvm::CallingConv::X86_INTR);
1913 bool X86_32TargetCodeGenInfo::initDwarfEHRegSizeTable(
1914 CodeGen::CodeGenFunction &CGF,
1915 llvm::Value *Address) const {
1916 CodeGen::CGBuilderTy &Builder = CGF.Builder;
1918 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4);
1920 // 0-7 are the eight integer registers; the order is different
1921 // on Darwin (for EH), but the range is the same.
1923 AssignToArrayRange(Builder, Address, Four8, 0, 8);
1925 if (CGF.CGM.getTarget().getTriple().isOSDarwin()) {
1926 // 12-16 are st(0..4). Not sure why we stop at 4.
1927 // These have size 16, which is sizeof(long double) on
1928 // platforms with 8-byte alignment for that type.
1929 llvm::Value *Sixteen8 = llvm::ConstantInt::get(CGF.Int8Ty, 16);
1930 AssignToArrayRange(Builder, Address, Sixteen8, 12, 16);
1933 // 9 is %eflags, which doesn't get a size on Darwin for some
1935 Builder.CreateAlignedStore(
1936 Four8, Builder.CreateConstInBoundsGEP1_32(CGF.Int8Ty, Address, 9),
1939 // 11-16 are st(0..5). Not sure why we stop at 5.
1940 // These have size 12, which is sizeof(long double) on
1941 // platforms with 4-byte alignment for that type.
1942 llvm::Value *Twelve8 = llvm::ConstantInt::get(CGF.Int8Ty, 12);
1943 AssignToArrayRange(Builder, Address, Twelve8, 11, 16);
1949 //===----------------------------------------------------------------------===//
1950 // X86-64 ABI Implementation
1951 //===----------------------------------------------------------------------===//
1955 /// The AVX ABI level for X86 targets.
1956 enum class X86AVXABILevel {
1962 /// \p returns the size in bits of the largest (native) vector for \p AVXLevel.
1963 static unsigned getNativeVectorSizeForAVXABI(X86AVXABILevel AVXLevel) {
1965 case X86AVXABILevel::AVX512:
1967 case X86AVXABILevel::AVX:
1969 case X86AVXABILevel::None:
1972 llvm_unreachable("Unknown AVXLevel");
1975 /// X86_64ABIInfo - The X86_64 ABI information.
1976 class X86_64ABIInfo : public SwiftABIInfo {
1988 /// merge - Implement the X86_64 ABI merging algorithm.
1990 /// Merge an accumulating classification \arg Accum with a field
1991 /// classification \arg Field.
1993 /// \param Accum - The accumulating classification. This should
1994 /// always be either NoClass or the result of a previous merge
1995 /// call. In addition, this should never be Memory (the caller
1996 /// should just return Memory for the aggregate).
1997 static Class merge(Class Accum, Class Field);
1999 /// postMerge - Implement the X86_64 ABI post merging algorithm.
2001 /// Post merger cleanup, reduces a malformed Hi and Lo pair to
2002 /// final MEMORY or SSE classes when necessary.
2004 /// \param AggregateSize - The size of the current aggregate in
2005 /// the classification process.
2007 /// \param Lo - The classification for the parts of the type
2008 /// residing in the low word of the containing object.
2010 /// \param Hi - The classification for the parts of the type
2011 /// residing in the higher words of the containing object.
2013 void postMerge(unsigned AggregateSize, Class &Lo, Class &Hi) const;
2015 /// classify - Determine the x86_64 register classes in which the
2016 /// given type T should be passed.
2018 /// \param Lo - The classification for the parts of the type
2019 /// residing in the low word of the containing object.
2021 /// \param Hi - The classification for the parts of the type
2022 /// residing in the high word of the containing object.
2024 /// \param OffsetBase - The bit offset of this type in the
2025 /// containing object. Some parameters are classified different
2026 /// depending on whether they straddle an eightbyte boundary.
2028 /// \param isNamedArg - Whether the argument in question is a "named"
2029 /// argument, as used in AMD64-ABI 3.5.7.
2031 /// If a word is unused its result will be NoClass; if a type should
2032 /// be passed in Memory then at least the classification of \arg Lo
2035 /// The \arg Lo class will be NoClass iff the argument is ignored.
2037 /// If the \arg Lo class is ComplexX87, then the \arg Hi class will
2038 /// also be ComplexX87.
2039 void classify(QualType T, uint64_t OffsetBase, Class &Lo, Class &Hi,
2040 bool isNamedArg) const;
2042 llvm::Type *GetByteVectorType(QualType Ty) const;
2043 llvm::Type *GetSSETypeAtOffset(llvm::Type *IRType,
2044 unsigned IROffset, QualType SourceTy,
2045 unsigned SourceOffset) const;
2046 llvm::Type *GetINTEGERTypeAtOffset(llvm::Type *IRType,
2047 unsigned IROffset, QualType SourceTy,
2048 unsigned SourceOffset) const;
2050 /// getIndirectResult - Give a source type \arg Ty, return a suitable result
2051 /// such that the argument will be returned in memory.
2052 ABIArgInfo getIndirectReturnResult(QualType Ty) const;
2054 /// getIndirectResult - Give a source type \arg Ty, return a suitable result
2055 /// such that the argument will be passed in memory.
2057 /// \param freeIntRegs - The number of free integer registers remaining
2059 ABIArgInfo getIndirectResult(QualType Ty, unsigned freeIntRegs) const;
2061 ABIArgInfo classifyReturnType(QualType RetTy) const;
2063 ABIArgInfo classifyArgumentType(QualType Ty, unsigned freeIntRegs,
2064 unsigned &neededInt, unsigned &neededSSE,
2065 bool isNamedArg) const;
2067 ABIArgInfo classifyRegCallStructType(QualType Ty, unsigned &NeededInt,
2068 unsigned &NeededSSE) const;
2070 ABIArgInfo classifyRegCallStructTypeImpl(QualType Ty, unsigned &NeededInt,
2071 unsigned &NeededSSE) const;
2073 bool IsIllegalVectorType(QualType Ty) const;
2075 /// The 0.98 ABI revision clarified a lot of ambiguities,
2076 /// unfortunately in ways that were not always consistent with
2077 /// certain previous compilers. In particular, platforms which
2078 /// required strict binary compatibility with older versions of GCC
2079 /// may need to exempt themselves.
2080 bool honorsRevision0_98() const {
2081 return !getTarget().getTriple().isOSDarwin();
2084 /// GCC classifies <1 x long long> as SSE but compatibility with older clang
2085 // compilers require us to classify it as INTEGER.
2086 bool classifyIntegerMMXAsSSE() const {
2087 const llvm::Triple &Triple = getTarget().getTriple();
2088 if (Triple.isOSDarwin() || Triple.getOS() == llvm::Triple::PS4)
2090 if (Triple.isOSFreeBSD() && Triple.getOSMajorVersion() >= 10)
2095 X86AVXABILevel AVXLevel;
2096 // Some ABIs (e.g. X32 ABI and Native Client OS) use 32 bit pointers on
2098 bool Has64BitPointers;
2101 X86_64ABIInfo(CodeGen::CodeGenTypes &CGT, X86AVXABILevel AVXLevel) :
2102 SwiftABIInfo(CGT), AVXLevel(AVXLevel),
2103 Has64BitPointers(CGT.getDataLayout().getPointerSize(0) == 8) {
2106 bool isPassedUsingAVXType(QualType type) const {
2107 unsigned neededInt, neededSSE;
2108 // The freeIntRegs argument doesn't matter here.
2109 ABIArgInfo info = classifyArgumentType(type, 0, neededInt, neededSSE,
2110 /*isNamedArg*/true);
2111 if (info.isDirect()) {
2112 llvm::Type *ty = info.getCoerceToType();
2113 if (llvm::VectorType *vectorTy = dyn_cast_or_null<llvm::VectorType>(ty))
2114 return (vectorTy->getBitWidth() > 128);
2119 void computeInfo(CGFunctionInfo &FI) const override;
2121 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
2122 QualType Ty) const override;
2123 Address EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
2124 QualType Ty) const override;
2126 bool has64BitPointers() const {
2127 return Has64BitPointers;
2130 bool shouldPassIndirectlyForSwift(CharUnits totalSize,
2131 ArrayRef<llvm::Type*> scalars,
2132 bool asReturnValue) const override {
2133 return occupiesMoreThan(CGT, scalars, /*total*/ 4);
2135 bool isSwiftErrorInRegister() const override {
2140 /// WinX86_64ABIInfo - The Windows X86_64 ABI information.
2141 class WinX86_64ABIInfo : public SwiftABIInfo {
2143 WinX86_64ABIInfo(CodeGen::CodeGenTypes &CGT)
2144 : SwiftABIInfo(CGT),
2145 IsMingw64(getTarget().getTriple().isWindowsGNUEnvironment()) {}
2147 void computeInfo(CGFunctionInfo &FI) const override;
2149 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
2150 QualType Ty) const override;
2152 bool isHomogeneousAggregateBaseType(QualType Ty) const override {
2153 // FIXME: Assumes vectorcall is in use.
2154 return isX86VectorTypeForVectorCall(getContext(), Ty);
2157 bool isHomogeneousAggregateSmallEnough(const Type *Ty,
2158 uint64_t NumMembers) const override {
2159 // FIXME: Assumes vectorcall is in use.
2160 return isX86VectorCallAggregateSmallEnough(NumMembers);
2163 bool shouldPassIndirectlyForSwift(CharUnits totalSize,
2164 ArrayRef<llvm::Type *> scalars,
2165 bool asReturnValue) const override {
2166 return occupiesMoreThan(CGT, scalars, /*total*/ 4);
2169 bool isSwiftErrorInRegister() const override {
2174 ABIArgInfo classify(QualType Ty, unsigned &FreeSSERegs, bool IsReturnType,
2175 bool IsVectorCall, bool IsRegCall) const;
2176 ABIArgInfo reclassifyHvaArgType(QualType Ty, unsigned &FreeSSERegs,
2177 const ABIArgInfo ¤t) const;
2178 void computeVectorCallArgs(CGFunctionInfo &FI, unsigned FreeSSERegs,
2179 bool IsVectorCall, bool IsRegCall) const;
2184 class X86_64TargetCodeGenInfo : public TargetCodeGenInfo {
2186 X86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, X86AVXABILevel AVXLevel)
2187 : TargetCodeGenInfo(new X86_64ABIInfo(CGT, AVXLevel)) {}
2189 const X86_64ABIInfo &getABIInfo() const {
2190 return static_cast<const X86_64ABIInfo&>(TargetCodeGenInfo::getABIInfo());
2193 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
2197 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
2198 llvm::Value *Address) const override {
2199 llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8);
2201 // 0-15 are the 16 integer registers.
2203 AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 16);
2207 llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
2208 StringRef Constraint,
2209 llvm::Type* Ty) const override {
2210 return X86AdjustInlineAsmType(CGF, Constraint, Ty);
2213 bool isNoProtoCallVariadic(const CallArgList &args,
2214 const FunctionNoProtoType *fnType) const override {
2215 // The default CC on x86-64 sets %al to the number of SSA
2216 // registers used, and GCC sets this when calling an unprototyped
2217 // function, so we override the default behavior. However, don't do
2218 // that when AVX types are involved: the ABI explicitly states it is
2219 // undefined, and it doesn't work in practice because of how the ABI
2220 // defines varargs anyway.
2221 if (fnType->getCallConv() == CC_C) {
2222 bool HasAVXType = false;
2223 for (CallArgList::const_iterator
2224 it = args.begin(), ie = args.end(); it != ie; ++it) {
2225 if (getABIInfo().isPassedUsingAVXType(it->Ty)) {
2235 return TargetCodeGenInfo::isNoProtoCallVariadic(args, fnType);
2239 getUBSanFunctionSignature(CodeGen::CodeGenModule &CGM) const override {
2241 if (getABIInfo().has64BitPointers())
2242 Sig = (0xeb << 0) | // jmp rel8
2243 (0x0a << 8) | // .+0x0c
2247 Sig = (0xeb << 0) | // jmp rel8
2248 (0x06 << 8) | // .+0x08
2251 return llvm::ConstantInt::get(CGM.Int32Ty, Sig);
2254 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
2255 CodeGen::CodeGenModule &CGM) const override {
2256 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
2257 if (FD->hasAttr<AnyX86InterruptAttr>()) {
2258 llvm::Function *Fn = cast<llvm::Function>(GV);
2259 Fn->setCallingConv(llvm::CallingConv::X86_INTR);
2265 class PS4TargetCodeGenInfo : public X86_64TargetCodeGenInfo {
2267 PS4TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, X86AVXABILevel AVXLevel)
2268 : X86_64TargetCodeGenInfo(CGT, AVXLevel) {}
2270 void getDependentLibraryOption(llvm::StringRef Lib,
2271 llvm::SmallString<24> &Opt) const override {
2273 // If the argument contains a space, enclose it in quotes.
2274 if (Lib.find(" ") != StringRef::npos)
2275 Opt += "\"" + Lib.str() + "\"";
2281 static std::string qualifyWindowsLibrary(llvm::StringRef Lib) {
2282 // If the argument does not end in .lib, automatically add the suffix.
2283 // If the argument contains a space, enclose it in quotes.
2284 // This matches the behavior of MSVC.
2285 bool Quote = (Lib.find(" ") != StringRef::npos);
2286 std::string ArgStr = Quote ? "\"" : "";
2288 if (!Lib.endswith_lower(".lib"))
2290 ArgStr += Quote ? "\"" : "";
2294 class WinX86_32TargetCodeGenInfo : public X86_32TargetCodeGenInfo {
2296 WinX86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT,
2297 bool DarwinVectorABI, bool RetSmallStructInRegABI, bool Win32StructABI,
2298 unsigned NumRegisterParameters)
2299 : X86_32TargetCodeGenInfo(CGT, DarwinVectorABI, RetSmallStructInRegABI,
2300 Win32StructABI, NumRegisterParameters, false) {}
2302 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
2303 CodeGen::CodeGenModule &CGM) const override;
2305 void getDependentLibraryOption(llvm::StringRef Lib,
2306 llvm::SmallString<24> &Opt) const override {
2307 Opt = "/DEFAULTLIB:";
2308 Opt += qualifyWindowsLibrary(Lib);
2311 void getDetectMismatchOption(llvm::StringRef Name,
2312 llvm::StringRef Value,
2313 llvm::SmallString<32> &Opt) const override {
2314 Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\"";
2318 static void addStackProbeSizeTargetAttribute(const Decl *D,
2319 llvm::GlobalValue *GV,
2320 CodeGen::CodeGenModule &CGM) {
2321 if (D && isa<FunctionDecl>(D)) {
2322 if (CGM.getCodeGenOpts().StackProbeSize != 4096) {
2323 llvm::Function *Fn = cast<llvm::Function>(GV);
2325 Fn->addFnAttr("stack-probe-size",
2326 llvm::utostr(CGM.getCodeGenOpts().StackProbeSize));
2331 void WinX86_32TargetCodeGenInfo::setTargetAttributes(const Decl *D,
2332 llvm::GlobalValue *GV,
2333 CodeGen::CodeGenModule &CGM) const {
2334 X86_32TargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
2336 addStackProbeSizeTargetAttribute(D, GV, CGM);
2339 class WinX86_64TargetCodeGenInfo : public TargetCodeGenInfo {
2341 WinX86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT,
2342 X86AVXABILevel AVXLevel)
2343 : TargetCodeGenInfo(new WinX86_64ABIInfo(CGT)) {}
2345 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
2346 CodeGen::CodeGenModule &CGM) const override;
2348 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
2352 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
2353 llvm::Value *Address) const override {
2354 llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8);
2356 // 0-15 are the 16 integer registers.
2358 AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 16);
2362 void getDependentLibraryOption(llvm::StringRef Lib,
2363 llvm::SmallString<24> &Opt) const override {
2364 Opt = "/DEFAULTLIB:";
2365 Opt += qualifyWindowsLibrary(Lib);
2368 void getDetectMismatchOption(llvm::StringRef Name,
2369 llvm::StringRef Value,
2370 llvm::SmallString<32> &Opt) const override {
2371 Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\"";
2375 void WinX86_64TargetCodeGenInfo::setTargetAttributes(const Decl *D,
2376 llvm::GlobalValue *GV,
2377 CodeGen::CodeGenModule &CGM) const {
2378 TargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
2380 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
2381 if (FD->hasAttr<AnyX86InterruptAttr>()) {
2382 llvm::Function *Fn = cast<llvm::Function>(GV);
2383 Fn->setCallingConv(llvm::CallingConv::X86_INTR);
2387 addStackProbeSizeTargetAttribute(D, GV, CGM);
2391 void X86_64ABIInfo::postMerge(unsigned AggregateSize, Class &Lo,
2393 // AMD64-ABI 3.2.3p2: Rule 5. Then a post merger cleanup is done:
2395 // (a) If one of the classes is Memory, the whole argument is passed in
2398 // (b) If X87UP is not preceded by X87, the whole argument is passed in
2401 // (c) If the size of the aggregate exceeds two eightbytes and the first
2402 // eightbyte isn't SSE or any other eightbyte isn't SSEUP, the whole
2403 // argument is passed in memory. NOTE: This is necessary to keep the
2404 // ABI working for processors that don't support the __m256 type.
2406 // (d) If SSEUP is not preceded by SSE or SSEUP, it is converted to SSE.
2408 // Some of these are enforced by the merging logic. Others can arise
2409 // only with unions; for example:
2410 // union { _Complex double; unsigned; }
2412 // Note that clauses (b) and (c) were added in 0.98.
2416 if (Hi == X87Up && Lo != X87 && honorsRevision0_98())
2418 if (AggregateSize > 128 && (Lo != SSE || Hi != SSEUp))
2420 if (Hi == SSEUp && Lo != SSE)
2424 X86_64ABIInfo::Class X86_64ABIInfo::merge(Class Accum, Class Field) {
2425 // AMD64-ABI 3.2.3p2: Rule 4. Each field of an object is
2426 // classified recursively so that always two fields are
2427 // considered. The resulting class is calculated according to
2428 // the classes of the fields in the eightbyte:
2430 // (a) If both classes are equal, this is the resulting class.
2432 // (b) If one of the classes is NO_CLASS, the resulting class is
2435 // (c) If one of the classes is MEMORY, the result is the MEMORY
2438 // (d) If one of the classes is INTEGER, the result is the
2441 // (e) If one of the classes is X87, X87UP, COMPLEX_X87 class,
2442 // MEMORY is used as class.
2444 // (f) Otherwise class SSE is used.
2446 // Accum should never be memory (we should have returned) or
2447 // ComplexX87 (because this cannot be passed in a structure).
2448 assert((Accum != Memory && Accum != ComplexX87) &&
2449 "Invalid accumulated classification during merge.");
2450 if (Accum == Field || Field == NoClass)
2452 if (Field == Memory)
2454 if (Accum == NoClass)
2456 if (Accum == Integer || Field == Integer)
2458 if (Field == X87 || Field == X87Up || Field == ComplexX87 ||
2459 Accum == X87 || Accum == X87Up)
2464 void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase,
2465 Class &Lo, Class &Hi, bool isNamedArg) const {
2466 // FIXME: This code can be simplified by introducing a simple value class for
2467 // Class pairs with appropriate constructor methods for the various
2470 // FIXME: Some of the split computations are wrong; unaligned vectors
2471 // shouldn't be passed in registers for example, so there is no chance they
2472 // can straddle an eightbyte. Verify & simplify.
2476 Class &Current = OffsetBase < 64 ? Lo : Hi;
2479 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
2480 BuiltinType::Kind k = BT->getKind();
2482 if (k == BuiltinType::Void) {
2484 } else if (k == BuiltinType::Int128 || k == BuiltinType::UInt128) {
2487 } else if (k >= BuiltinType::Bool && k <= BuiltinType::LongLong) {
2489 } else if (k == BuiltinType::Float || k == BuiltinType::Double) {
2491 } else if (k == BuiltinType::LongDouble) {
2492 const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat();
2493 if (LDF == &llvm::APFloat::IEEEquad()) {
2496 } else if (LDF == &llvm::APFloat::x87DoubleExtended()) {
2499 } else if (LDF == &llvm::APFloat::IEEEdouble()) {
2502 llvm_unreachable("unexpected long double representation!");
2504 // FIXME: _Decimal32 and _Decimal64 are SSE.
2505 // FIXME: _float128 and _Decimal128 are (SSE, SSEUp).
2509 if (const EnumType *ET = Ty->getAs<EnumType>()) {
2510 // Classify the underlying integer type.
2511 classify(ET->getDecl()->getIntegerType(), OffsetBase, Lo, Hi, isNamedArg);
2515 if (Ty->hasPointerRepresentation()) {
2520 if (Ty->isMemberPointerType()) {
2521 if (Ty->isMemberFunctionPointerType()) {
2522 if (Has64BitPointers) {
2523 // If Has64BitPointers, this is an {i64, i64}, so classify both
2527 // Otherwise, with 32-bit pointers, this is an {i32, i32}. If that
2528 // straddles an eightbyte boundary, Hi should be classified as well.
2529 uint64_t EB_FuncPtr = (OffsetBase) / 64;
2530 uint64_t EB_ThisAdj = (OffsetBase + 64 - 1) / 64;
2531 if (EB_FuncPtr != EB_ThisAdj) {
2543 if (const VectorType *VT = Ty->getAs<VectorType>()) {
2544 uint64_t Size = getContext().getTypeSize(VT);
2545 if (Size == 1 || Size == 8 || Size == 16 || Size == 32) {
2546 // gcc passes the following as integer:
2547 // 4 bytes - <4 x char>, <2 x short>, <1 x int>, <1 x float>
2548 // 2 bytes - <2 x char>, <1 x short>
2549 // 1 byte - <1 x char>
2552 // If this type crosses an eightbyte boundary, it should be
2554 uint64_t EB_Lo = (OffsetBase) / 64;
2555 uint64_t EB_Hi = (OffsetBase + Size - 1) / 64;
2558 } else if (Size == 64) {
2559 QualType ElementType = VT->getElementType();
2561 // gcc passes <1 x double> in memory. :(
2562 if (ElementType->isSpecificBuiltinType(BuiltinType::Double))
2565 // gcc passes <1 x long long> as SSE but clang used to unconditionally
2566 // pass them as integer. For platforms where clang is the de facto
2567 // platform compiler, we must continue to use integer.
2568 if (!classifyIntegerMMXAsSSE() &&
2569 (ElementType->isSpecificBuiltinType(BuiltinType::LongLong) ||
2570 ElementType->isSpecificBuiltinType(BuiltinType::ULongLong) ||
2571 ElementType->isSpecificBuiltinType(BuiltinType::Long) ||
2572 ElementType->isSpecificBuiltinType(BuiltinType::ULong)))
2577 // If this type crosses an eightbyte boundary, it should be
2579 if (OffsetBase && OffsetBase != 64)
2581 } else if (Size == 128 ||
2582 (isNamedArg && Size <= getNativeVectorSizeForAVXABI(AVXLevel))) {
2583 // Arguments of 256-bits are split into four eightbyte chunks. The
2584 // least significant one belongs to class SSE and all the others to class
2585 // SSEUP. The original Lo and Hi design considers that types can't be
2586 // greater than 128-bits, so a 64-bit split in Hi and Lo makes sense.
2587 // This design isn't correct for 256-bits, but since there're no cases
2588 // where the upper parts would need to be inspected, avoid adding
2589 // complexity and just consider Hi to match the 64-256 part.
2591 // Note that per 3.5.7 of AMD64-ABI, 256-bit args are only passed in
2592 // registers if they are "named", i.e. not part of the "..." of a
2593 // variadic function.
2595 // Similarly, per 3.2.3. of the AVX512 draft, 512-bits ("named") args are
2596 // split into eight eightbyte chunks, one SSE and seven SSEUP.
2603 if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
2604 QualType ET = getContext().getCanonicalType(CT->getElementType());
2606 uint64_t Size = getContext().getTypeSize(Ty);
2607 if (ET->isIntegralOrEnumerationType()) {
2610 else if (Size <= 128)
2612 } else if (ET == getContext().FloatTy) {
2614 } else if (ET == getContext().DoubleTy) {
2616 } else if (ET == getContext().LongDoubleTy) {
2617 const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat();
2618 if (LDF == &llvm::APFloat::IEEEquad())
2620 else if (LDF == &llvm::APFloat::x87DoubleExtended())
2621 Current = ComplexX87;
2622 else if (LDF == &llvm::APFloat::IEEEdouble())
2625 llvm_unreachable("unexpected long double representation!");
2628 // If this complex type crosses an eightbyte boundary then it
2630 uint64_t EB_Real = (OffsetBase) / 64;
2631 uint64_t EB_Imag = (OffsetBase + getContext().getTypeSize(ET)) / 64;
2632 if (Hi == NoClass && EB_Real != EB_Imag)
2638 if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
2639 // Arrays are treated like structures.
2641 uint64_t Size = getContext().getTypeSize(Ty);
2643 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
2644 // than eight eightbytes, ..., it has class MEMORY.
2648 // AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned
2649 // fields, it has class MEMORY.
2651 // Only need to check alignment of array base.
2652 if (OffsetBase % getContext().getTypeAlign(AT->getElementType()))
2655 // Otherwise implement simplified merge. We could be smarter about
2656 // this, but it isn't worth it and would be harder to verify.
2658 uint64_t EltSize = getContext().getTypeSize(AT->getElementType());
2659 uint64_t ArraySize = AT->getSize().getZExtValue();
2661 // The only case a 256-bit wide vector could be used is when the array
2662 // contains a single 256-bit element. Since Lo and Hi logic isn't extended
2663 // to work for sizes wider than 128, early check and fallback to memory.
2666 (Size != EltSize || Size > getNativeVectorSizeForAVXABI(AVXLevel)))
2669 for (uint64_t i=0, Offset=OffsetBase; i<ArraySize; ++i, Offset += EltSize) {
2670 Class FieldLo, FieldHi;
2671 classify(AT->getElementType(), Offset, FieldLo, FieldHi, isNamedArg);
2672 Lo = merge(Lo, FieldLo);
2673 Hi = merge(Hi, FieldHi);
2674 if (Lo == Memory || Hi == Memory)
2678 postMerge(Size, Lo, Hi);
2679 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp array classification.");
2683 if (const RecordType *RT = Ty->getAs<RecordType>()) {
2684 uint64_t Size = getContext().getTypeSize(Ty);
2686 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
2687 // than eight eightbytes, ..., it has class MEMORY.
2691 // AMD64-ABI 3.2.3p2: Rule 2. If a C++ object has either a non-trivial
2692 // copy constructor or a non-trivial destructor, it is passed by invisible
2694 if (getRecordArgABI(RT, getCXXABI()))
2697 const RecordDecl *RD = RT->getDecl();
2699 // Assume variable sized types are passed in memory.
2700 if (RD->hasFlexibleArrayMember())
2703 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
2705 // Reset Lo class, this will be recomputed.
2708 // If this is a C++ record, classify the bases first.
2709 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
2710 for (const auto &I : CXXRD->bases()) {
2711 assert(!I.isVirtual() && !I.getType()->isDependentType() &&
2712 "Unexpected base class!");
2713 const CXXRecordDecl *Base =
2714 cast<CXXRecordDecl>(I.getType()->getAs<RecordType>()->getDecl());
2716 // Classify this field.
2718 // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate exceeds a
2719 // single eightbyte, each is classified separately. Each eightbyte gets
2720 // initialized to class NO_CLASS.
2721 Class FieldLo, FieldHi;
2723 OffsetBase + getContext().toBits(Layout.getBaseClassOffset(Base));
2724 classify(I.getType(), Offset, FieldLo, FieldHi, isNamedArg);
2725 Lo = merge(Lo, FieldLo);
2726 Hi = merge(Hi, FieldHi);
2727 if (Lo == Memory || Hi == Memory) {
2728 postMerge(Size, Lo, Hi);
2734 // Classify the fields one at a time, merging the results.
2736 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
2737 i != e; ++i, ++idx) {
2738 uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx);
2739 bool BitField = i->isBitField();
2741 // Ignore padding bit-fields.
2742 if (BitField && i->isUnnamedBitfield())
2745 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger than
2746 // four eightbytes, or it contains unaligned fields, it has class MEMORY.
2748 // The only case a 256-bit wide vector could be used is when the struct
2749 // contains a single 256-bit element. Since Lo and Hi logic isn't extended
2750 // to work for sizes wider than 128, early check and fallback to memory.
2752 if (Size > 128 && (Size != getContext().getTypeSize(i->getType()) ||
2753 Size > getNativeVectorSizeForAVXABI(AVXLevel))) {
2755 postMerge(Size, Lo, Hi);
2758 // Note, skip this test for bit-fields, see below.
2759 if (!BitField && Offset % getContext().getTypeAlign(i->getType())) {
2761 postMerge(Size, Lo, Hi);
2765 // Classify this field.
2767 // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate
2768 // exceeds a single eightbyte, each is classified
2769 // separately. Each eightbyte gets initialized to class
2771 Class FieldLo, FieldHi;
2773 // Bit-fields require special handling, they do not force the
2774 // structure to be passed in memory even if unaligned, and
2775 // therefore they can straddle an eightbyte.
2777 assert(!i->isUnnamedBitfield());
2778 uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx);
2779 uint64_t Size = i->getBitWidthValue(getContext());
2781 uint64_t EB_Lo = Offset / 64;
2782 uint64_t EB_Hi = (Offset + Size - 1) / 64;
2785 assert(EB_Hi == EB_Lo && "Invalid classification, type > 16 bytes.");
2790 FieldHi = EB_Hi ? Integer : NoClass;
2793 classify(i->getType(), Offset, FieldLo, FieldHi, isNamedArg);
2794 Lo = merge(Lo, FieldLo);
2795 Hi = merge(Hi, FieldHi);
2796 if (Lo == Memory || Hi == Memory)
2800 postMerge(Size, Lo, Hi);
2804 ABIArgInfo X86_64ABIInfo::getIndirectReturnResult(QualType Ty) const {
2805 // If this is a scalar LLVM value then assume LLVM will pass it in the right
2807 if (!isAggregateTypeForABI(Ty)) {
2808 // Treat an enum type as its underlying type.
2809 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
2810 Ty = EnumTy->getDecl()->getIntegerType();
2812 return (Ty->isPromotableIntegerType() ?
2813 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
2816 return getNaturalAlignIndirect(Ty);
2819 bool X86_64ABIInfo::IsIllegalVectorType(QualType Ty) const {
2820 if (const VectorType *VecTy = Ty->getAs<VectorType>()) {
2821 uint64_t Size = getContext().getTypeSize(VecTy);
2822 unsigned LargestVector = getNativeVectorSizeForAVXABI(AVXLevel);
2823 if (Size <= 64 || Size > LargestVector)
2830 ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty,
2831 unsigned freeIntRegs) const {
2832 // If this is a scalar LLVM value then assume LLVM will pass it in the right
2835 // This assumption is optimistic, as there could be free registers available
2836 // when we need to pass this argument in memory, and LLVM could try to pass
2837 // the argument in the free register. This does not seem to happen currently,
2838 // but this code would be much safer if we could mark the argument with
2839 // 'onstack'. See PR12193.
2840 if (!isAggregateTypeForABI(Ty) && !IsIllegalVectorType(Ty)) {
2841 // Treat an enum type as its underlying type.
2842 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
2843 Ty = EnumTy->getDecl()->getIntegerType();
2845 return (Ty->isPromotableIntegerType() ?
2846 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
2849 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
2850 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
2852 // Compute the byval alignment. We specify the alignment of the byval in all
2853 // cases so that the mid-level optimizer knows the alignment of the byval.
2854 unsigned Align = std::max(getContext().getTypeAlign(Ty) / 8, 8U);
2856 // Attempt to avoid passing indirect results using byval when possible. This
2857 // is important for good codegen.
2859 // We do this by coercing the value into a scalar type which the backend can
2860 // handle naturally (i.e., without using byval).
2862 // For simplicity, we currently only do this when we have exhausted all of the
2863 // free integer registers. Doing this when there are free integer registers
2864 // would require more care, as we would have to ensure that the coerced value
2865 // did not claim the unused register. That would require either reording the
2866 // arguments to the function (so that any subsequent inreg values came first),
2867 // or only doing this optimization when there were no following arguments that
2870 // We currently expect it to be rare (particularly in well written code) for
2871 // arguments to be passed on the stack when there are still free integer
2872 // registers available (this would typically imply large structs being passed
2873 // by value), so this seems like a fair tradeoff for now.
2875 // We can revisit this if the backend grows support for 'onstack' parameter
2876 // attributes. See PR12193.
2877 if (freeIntRegs == 0) {
2878 uint64_t Size = getContext().getTypeSize(Ty);
2880 // If this type fits in an eightbyte, coerce it into the matching integral
2881 // type, which will end up on the stack (with alignment 8).
2882 if (Align == 8 && Size <= 64)
2883 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
2887 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(Align));
2890 /// The ABI specifies that a value should be passed in a full vector XMM/YMM
2891 /// register. Pick an LLVM IR type that will be passed as a vector register.
2892 llvm::Type *X86_64ABIInfo::GetByteVectorType(QualType Ty) const {
2893 // Wrapper structs/arrays that only contain vectors are passed just like
2894 // vectors; strip them off if present.
2895 if (const Type *InnerTy = isSingleElementStruct(Ty, getContext()))
2896 Ty = QualType(InnerTy, 0);
2898 llvm::Type *IRType = CGT.ConvertType(Ty);
2899 if (isa<llvm::VectorType>(IRType) ||
2900 IRType->getTypeID() == llvm::Type::FP128TyID)
2903 // We couldn't find the preferred IR vector type for 'Ty'.
2904 uint64_t Size = getContext().getTypeSize(Ty);
2905 assert((Size == 128 || Size == 256 || Size == 512) && "Invalid type found!");
2907 // Return a LLVM IR vector type based on the size of 'Ty'.
2908 return llvm::VectorType::get(llvm::Type::getDoubleTy(getVMContext()),
2912 /// BitsContainNoUserData - Return true if the specified [start,end) bit range
2913 /// is known to either be off the end of the specified type or being in
2914 /// alignment padding. The user type specified is known to be at most 128 bits
2915 /// in size, and have passed through X86_64ABIInfo::classify with a successful
2916 /// classification that put one of the two halves in the INTEGER class.
2918 /// It is conservatively correct to return false.
2919 static bool BitsContainNoUserData(QualType Ty, unsigned StartBit,
2920 unsigned EndBit, ASTContext &Context) {
2921 // If the bytes being queried are off the end of the type, there is no user
2922 // data hiding here. This handles analysis of builtins, vectors and other
2923 // types that don't contain interesting padding.
2924 unsigned TySize = (unsigned)Context.getTypeSize(Ty);
2925 if (TySize <= StartBit)
2928 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) {
2929 unsigned EltSize = (unsigned)Context.getTypeSize(AT->getElementType());
2930 unsigned NumElts = (unsigned)AT->getSize().getZExtValue();
2932 // Check each element to see if the element overlaps with the queried range.
2933 for (unsigned i = 0; i != NumElts; ++i) {
2934 // If the element is after the span we care about, then we're done..
2935 unsigned EltOffset = i*EltSize;
2936 if (EltOffset >= EndBit) break;
2938 unsigned EltStart = EltOffset < StartBit ? StartBit-EltOffset :0;
2939 if (!BitsContainNoUserData(AT->getElementType(), EltStart,
2940 EndBit-EltOffset, Context))
2943 // If it overlaps no elements, then it is safe to process as padding.
2947 if (const RecordType *RT = Ty->getAs<RecordType>()) {
2948 const RecordDecl *RD = RT->getDecl();
2949 const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
2951 // If this is a C++ record, check the bases first.
2952 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
2953 for (const auto &I : CXXRD->bases()) {
2954 assert(!I.isVirtual() && !I.getType()->isDependentType() &&
2955 "Unexpected base class!");
2956 const CXXRecordDecl *Base =
2957 cast<CXXRecordDecl>(I.getType()->getAs<RecordType>()->getDecl());
2959 // If the base is after the span we care about, ignore it.
2960 unsigned BaseOffset = Context.toBits(Layout.getBaseClassOffset(Base));
2961 if (BaseOffset >= EndBit) continue;
2963 unsigned BaseStart = BaseOffset < StartBit ? StartBit-BaseOffset :0;
2964 if (!BitsContainNoUserData(I.getType(), BaseStart,
2965 EndBit-BaseOffset, Context))
2970 // Verify that no field has data that overlaps the region of interest. Yes
2971 // this could be sped up a lot by being smarter about queried fields,
2972 // however we're only looking at structs up to 16 bytes, so we don't care
2975 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
2976 i != e; ++i, ++idx) {
2977 unsigned FieldOffset = (unsigned)Layout.getFieldOffset(idx);
2979 // If we found a field after the region we care about, then we're done.
2980 if (FieldOffset >= EndBit) break;
2982 unsigned FieldStart = FieldOffset < StartBit ? StartBit-FieldOffset :0;
2983 if (!BitsContainNoUserData(i->getType(), FieldStart, EndBit-FieldOffset,
2988 // If nothing in this record overlapped the area of interest, then we're
2996 /// ContainsFloatAtOffset - Return true if the specified LLVM IR type has a
2997 /// float member at the specified offset. For example, {int,{float}} has a
2998 /// float at offset 4. It is conservatively correct for this routine to return
3000 static bool ContainsFloatAtOffset(llvm::Type *IRType, unsigned IROffset,
3001 const llvm::DataLayout &TD) {
3002 // Base case if we find a float.
3003 if (IROffset == 0 && IRType->isFloatTy())
3006 // If this is a struct, recurse into the field at the specified offset.
3007 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {
3008 const llvm::StructLayout *SL = TD.getStructLayout(STy);
3009 unsigned Elt = SL->getElementContainingOffset(IROffset);
3010 IROffset -= SL->getElementOffset(Elt);
3011 return ContainsFloatAtOffset(STy->getElementType(Elt), IROffset, TD);
3014 // If this is an array, recurse into the field at the specified offset.
3015 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
3016 llvm::Type *EltTy = ATy->getElementType();
3017 unsigned EltSize = TD.getTypeAllocSize(EltTy);
3018 IROffset -= IROffset/EltSize*EltSize;
3019 return ContainsFloatAtOffset(EltTy, IROffset, TD);
3026 /// GetSSETypeAtOffset - Return a type that will be passed by the backend in the
3027 /// low 8 bytes of an XMM register, corresponding to the SSE class.
3028 llvm::Type *X86_64ABIInfo::
3029 GetSSETypeAtOffset(llvm::Type *IRType, unsigned IROffset,
3030 QualType SourceTy, unsigned SourceOffset) const {
3031 // The only three choices we have are either double, <2 x float>, or float. We
3032 // pass as float if the last 4 bytes is just padding. This happens for
3033 // structs that contain 3 floats.
3034 if (BitsContainNoUserData(SourceTy, SourceOffset*8+32,
3035 SourceOffset*8+64, getContext()))
3036 return llvm::Type::getFloatTy(getVMContext());
3038 // We want to pass as <2 x float> if the LLVM IR type contains a float at
3039 // offset+0 and offset+4. Walk the LLVM IR type to find out if this is the
3041 if (ContainsFloatAtOffset(IRType, IROffset, getDataLayout()) &&
3042 ContainsFloatAtOffset(IRType, IROffset+4, getDataLayout()))
3043 return llvm::VectorType::get(llvm::Type::getFloatTy(getVMContext()), 2);
3045 return llvm::Type::getDoubleTy(getVMContext());
3049 /// GetINTEGERTypeAtOffset - The ABI specifies that a value should be passed in
3050 /// an 8-byte GPR. This means that we either have a scalar or we are talking
3051 /// about the high or low part of an up-to-16-byte struct. This routine picks
3052 /// the best LLVM IR type to represent this, which may be i64 or may be anything
3053 /// else that the backend will pass in a GPR that works better (e.g. i8, %foo*,
3056 /// PrefType is an LLVM IR type that corresponds to (part of) the IR type for
3057 /// the source type. IROffset is an offset in bytes into the LLVM IR type that
3058 /// the 8-byte value references. PrefType may be null.
3060 /// SourceTy is the source-level type for the entire argument. SourceOffset is
3061 /// an offset into this that we're processing (which is always either 0 or 8).
3063 llvm::Type *X86_64ABIInfo::
3064 GetINTEGERTypeAtOffset(llvm::Type *IRType, unsigned IROffset,
3065 QualType SourceTy, unsigned SourceOffset) const {
3066 // If we're dealing with an un-offset LLVM IR type, then it means that we're
3067 // returning an 8-byte unit starting with it. See if we can safely use it.
3068 if (IROffset == 0) {
3069 // Pointers and int64's always fill the 8-byte unit.
3070 if ((isa<llvm::PointerType>(IRType) && Has64BitPointers) ||
3071 IRType->isIntegerTy(64))
3074 // If we have a 1/2/4-byte integer, we can use it only if the rest of the
3075 // goodness in the source type is just tail padding. This is allowed to
3076 // kick in for struct {double,int} on the int, but not on
3077 // struct{double,int,int} because we wouldn't return the second int. We
3078 // have to do this analysis on the source type because we can't depend on
3079 // unions being lowered a specific way etc.
3080 if (IRType->isIntegerTy(8) || IRType->isIntegerTy(16) ||
3081 IRType->isIntegerTy(32) ||
3082 (isa<llvm::PointerType>(IRType) && !Has64BitPointers)) {
3083 unsigned BitWidth = isa<llvm::PointerType>(IRType) ? 32 :
3084 cast<llvm::IntegerType>(IRType)->getBitWidth();
3086 if (BitsContainNoUserData(SourceTy, SourceOffset*8+BitWidth,
3087 SourceOffset*8+64, getContext()))
3092 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {
3093 // If this is a struct, recurse into the field at the specified offset.
3094 const llvm::StructLayout *SL = getDataLayout().getStructLayout(STy);
3095 if (IROffset < SL->getSizeInBytes()) {
3096 unsigned FieldIdx = SL->getElementContainingOffset(IROffset);
3097 IROffset -= SL->getElementOffset(FieldIdx);
3099 return GetINTEGERTypeAtOffset(STy->getElementType(FieldIdx), IROffset,
3100 SourceTy, SourceOffset);
3104 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
3105 llvm::Type *EltTy = ATy->getElementType();
3106 unsigned EltSize = getDataLayout().getTypeAllocSize(EltTy);
3107 unsigned EltOffset = IROffset/EltSize*EltSize;
3108 return GetINTEGERTypeAtOffset(EltTy, IROffset-EltOffset, SourceTy,
3112 // Okay, we don't have any better idea of what to pass, so we pass this in an
3113 // integer register that isn't too big to fit the rest of the struct.
3114 unsigned TySizeInBytes =
3115 (unsigned)getContext().getTypeSizeInChars(SourceTy).getQuantity();
3117 assert(TySizeInBytes != SourceOffset && "Empty field?");
3119 // It is always safe to classify this as an integer type up to i64 that
3120 // isn't larger than the structure.
3121 return llvm::IntegerType::get(getVMContext(),
3122 std::min(TySizeInBytes-SourceOffset, 8U)*8);
3126 /// GetX86_64ByValArgumentPair - Given a high and low type that can ideally
3127 /// be used as elements of a two register pair to pass or return, return a
3128 /// first class aggregate to represent them. For example, if the low part of
3129 /// a by-value argument should be passed as i32* and the high part as float,
3130 /// return {i32*, float}.
3132 GetX86_64ByValArgumentPair(llvm::Type *Lo, llvm::Type *Hi,
3133 const llvm::DataLayout &TD) {
3134 // In order to correctly satisfy the ABI, we need to the high part to start
3135 // at offset 8. If the high and low parts we inferred are both 4-byte types
3136 // (e.g. i32 and i32) then the resultant struct type ({i32,i32}) won't have
3137 // the second element at offset 8. Check for this:
3138 unsigned LoSize = (unsigned)TD.getTypeAllocSize(Lo);
3139 unsigned HiAlign = TD.getABITypeAlignment(Hi);
3140 unsigned HiStart = llvm::alignTo(LoSize, HiAlign);
3141 assert(HiStart != 0 && HiStart <= 8 && "Invalid x86-64 argument pair!");
3143 // To handle this, we have to increase the size of the low part so that the
3144 // second element will start at an 8 byte offset. We can't increase the size
3145 // of the second element because it might make us access off the end of the
3148 // There are usually two sorts of types the ABI generation code can produce
3149 // for the low part of a pair that aren't 8 bytes in size: float or
3150 // i8/i16/i32. This can also include pointers when they are 32-bit (X32 and
3152 // Promote these to a larger type.
3153 if (Lo->isFloatTy())
3154 Lo = llvm::Type::getDoubleTy(Lo->getContext());
3156 assert((Lo->isIntegerTy() || Lo->isPointerTy())
3157 && "Invalid/unknown lo type");
3158 Lo = llvm::Type::getInt64Ty(Lo->getContext());
3162 llvm::StructType *Result = llvm::StructType::get(Lo, Hi, nullptr);
3165 // Verify that the second element is at an 8-byte offset.
3166 assert(TD.getStructLayout(Result)->getElementOffset(1) == 8 &&
3167 "Invalid x86-64 argument pair!");
3171 ABIArgInfo X86_64ABIInfo::
3172 classifyReturnType(QualType RetTy) const {
3173 // AMD64-ABI 3.2.3p4: Rule 1. Classify the return type with the
3174 // classification algorithm.
3175 X86_64ABIInfo::Class Lo, Hi;
3176 classify(RetTy, 0, Lo, Hi, /*isNamedArg*/ true);
3178 // Check some invariants.
3179 assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.");
3180 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.");
3182 llvm::Type *ResType = nullptr;
3186 return ABIArgInfo::getIgnore();
3187 // If the low part is just padding, it takes no register, leave ResType
3189 assert((Hi == SSE || Hi == Integer || Hi == X87Up) &&
3190 "Unknown missing lo part");
3195 llvm_unreachable("Invalid classification for lo word.");
3197 // AMD64-ABI 3.2.3p4: Rule 2. Types of class memory are returned via
3200 return getIndirectReturnResult(RetTy);
3202 // AMD64-ABI 3.2.3p4: Rule 3. If the class is INTEGER, the next
3203 // available register of the sequence %rax, %rdx is used.
3205 ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0);
3207 // If we have a sign or zero extended integer, make sure to return Extend
3208 // so that the parameter gets the right LLVM IR attributes.
3209 if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) {
3210 // Treat an enum type as its underlying type.
3211 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
3212 RetTy = EnumTy->getDecl()->getIntegerType();
3214 if (RetTy->isIntegralOrEnumerationType() &&
3215 RetTy->isPromotableIntegerType())
3216 return ABIArgInfo::getExtend();
3220 // AMD64-ABI 3.2.3p4: Rule 4. If the class is SSE, the next
3221 // available SSE register of the sequence %xmm0, %xmm1 is used.
3223 ResType = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0);
3226 // AMD64-ABI 3.2.3p4: Rule 6. If the class is X87, the value is
3227 // returned on the X87 stack in %st0 as 80-bit x87 number.
3229 ResType = llvm::Type::getX86_FP80Ty(getVMContext());
3232 // AMD64-ABI 3.2.3p4: Rule 8. If the class is COMPLEX_X87, the real
3233 // part of the value is returned in %st0 and the imaginary part in
3236 assert(Hi == ComplexX87 && "Unexpected ComplexX87 classification.");
3237 ResType = llvm::StructType::get(llvm::Type::getX86_FP80Ty(getVMContext()),
3238 llvm::Type::getX86_FP80Ty(getVMContext()),
3243 llvm::Type *HighPart = nullptr;
3245 // Memory was handled previously and X87 should
3246 // never occur as a hi class.
3249 llvm_unreachable("Invalid classification for hi word.");
3251 case ComplexX87: // Previously handled.
3256 HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
3257 if (Lo == NoClass) // Return HighPart at offset 8 in memory.
3258 return ABIArgInfo::getDirect(HighPart, 8);
3261 HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
3262 if (Lo == NoClass) // Return HighPart at offset 8 in memory.
3263 return ABIArgInfo::getDirect(HighPart, 8);
3266 // AMD64-ABI 3.2.3p4: Rule 5. If the class is SSEUP, the eightbyte
3267 // is passed in the next available eightbyte chunk if the last used
3270 // SSEUP should always be preceded by SSE, just widen.
3272 assert(Lo == SSE && "Unexpected SSEUp classification.");
3273 ResType = GetByteVectorType(RetTy);
3276 // AMD64-ABI 3.2.3p4: Rule 7. If the class is X87UP, the value is
3277 // returned together with the previous X87 value in %st0.
3279 // If X87Up is preceded by X87, we don't need to do
3280 // anything. However, in some cases with unions it may not be
3281 // preceded by X87. In such situations we follow gcc and pass the
3282 // extra bits in an SSE reg.
3284 HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
3285 if (Lo == NoClass) // Return HighPart at offset 8 in memory.
3286 return ABIArgInfo::getDirect(HighPart, 8);
3291 // If a high part was specified, merge it together with the low part. It is
3292 // known to pass in the high eightbyte of the result. We do this by forming a
3293 // first class struct aggregate with the high and low part: {low, high}
3295 ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getDataLayout());
3297 return ABIArgInfo::getDirect(ResType);
3300 ABIArgInfo X86_64ABIInfo::classifyArgumentType(
3301 QualType Ty, unsigned freeIntRegs, unsigned &neededInt, unsigned &neededSSE,
3305 Ty = useFirstFieldIfTransparentUnion(Ty);
3307 X86_64ABIInfo::Class Lo, Hi;
3308 classify(Ty, 0, Lo, Hi, isNamedArg);
3310 // Check some invariants.
3311 // FIXME: Enforce these by construction.
3312 assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.");
3313 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.");
3317 llvm::Type *ResType = nullptr;
3321 return ABIArgInfo::getIgnore();
3322 // If the low part is just padding, it takes no register, leave ResType
3324 assert((Hi == SSE || Hi == Integer || Hi == X87Up) &&
3325 "Unknown missing lo part");
3328 // AMD64-ABI 3.2.3p3: Rule 1. If the class is MEMORY, pass the argument
3332 // AMD64-ABI 3.2.3p3: Rule 5. If the class is X87, X87UP or
3333 // COMPLEX_X87, it is passed in memory.
3336 if (getRecordArgABI(Ty, getCXXABI()) == CGCXXABI::RAA_Indirect)
3338 return getIndirectResult(Ty, freeIntRegs);
3342 llvm_unreachable("Invalid classification for lo word.");
3344 // AMD64-ABI 3.2.3p3: Rule 2. If the class is INTEGER, the next
3345 // available register of the sequence %rdi, %rsi, %rdx, %rcx, %r8
3350 // Pick an 8-byte type based on the preferred type.
3351 ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 0, Ty, 0);
3353 // If we have a sign or zero extended integer, make sure to return Extend
3354 // so that the parameter gets the right LLVM IR attributes.
3355 if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) {
3356 // Treat an enum type as its underlying type.
3357 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
3358 Ty = EnumTy->getDecl()->getIntegerType();
3360 if (Ty->isIntegralOrEnumerationType() &&
3361 Ty->isPromotableIntegerType())
3362 return ABIArgInfo::getExtend();
3367 // AMD64-ABI 3.2.3p3: Rule 3. If the class is SSE, the next
3368 // available SSE register is used, the registers are taken in the
3369 // order from %xmm0 to %xmm7.
3371 llvm::Type *IRType = CGT.ConvertType(Ty);
3372 ResType = GetSSETypeAtOffset(IRType, 0, Ty, 0);
3378 llvm::Type *HighPart = nullptr;
3380 // Memory was handled previously, ComplexX87 and X87 should
3381 // never occur as hi classes, and X87Up must be preceded by X87,
3382 // which is passed in memory.
3386 llvm_unreachable("Invalid classification for hi word.");
3388 case NoClass: break;
3392 // Pick an 8-byte type based on the preferred type.
3393 HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8);
3395 if (Lo == NoClass) // Pass HighPart at offset 8 in memory.
3396 return ABIArgInfo::getDirect(HighPart, 8);
3399 // X87Up generally doesn't occur here (long double is passed in
3400 // memory), except in situations involving unions.
3403 HighPart = GetSSETypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8);
3405 if (Lo == NoClass) // Pass HighPart at offset 8 in memory.
3406 return ABIArgInfo::getDirect(HighPart, 8);
3411 // AMD64-ABI 3.2.3p3: Rule 4. If the class is SSEUP, the
3412 // eightbyte is passed in the upper half of the last used SSE
3413 // register. This only happens when 128-bit vectors are passed.
3415 assert(Lo == SSE && "Unexpected SSEUp classification");
3416 ResType = GetByteVectorType(Ty);
3420 // If a high part was specified, merge it together with the low part. It is
3421 // known to pass in the high eightbyte of the result. We do this by forming a
3422 // first class struct aggregate with the high and low part: {low, high}
3424 ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getDataLayout());
3426 return ABIArgInfo::getDirect(ResType);
3430 X86_64ABIInfo::classifyRegCallStructTypeImpl(QualType Ty, unsigned &NeededInt,
3431 unsigned &NeededSSE) const {
3432 auto RT = Ty->getAs<RecordType>();
3433 assert(RT && "classifyRegCallStructType only valid with struct types");
3435 if (RT->getDecl()->hasFlexibleArrayMember())
3436 return getIndirectReturnResult(Ty);
3439 if (auto CXXRD = dyn_cast<CXXRecordDecl>(RT->getDecl())) {
3440 if (CXXRD->isDynamicClass()) {
3441 NeededInt = NeededSSE = 0;
3442 return getIndirectReturnResult(Ty);
3445 for (const auto &I : CXXRD->bases())
3446 if (classifyRegCallStructTypeImpl(I.getType(), NeededInt, NeededSSE)
3448 NeededInt = NeededSSE = 0;
3449 return getIndirectReturnResult(Ty);
3454 for (const auto *FD : RT->getDecl()->fields()) {
3455 if (FD->getType()->isRecordType() && !FD->getType()->isUnionType()) {
3456 if (classifyRegCallStructTypeImpl(FD->getType(), NeededInt, NeededSSE)
3458 NeededInt = NeededSSE = 0;
3459 return getIndirectReturnResult(Ty);
3462 unsigned LocalNeededInt, LocalNeededSSE;
3463 if (classifyArgumentType(FD->getType(), UINT_MAX, LocalNeededInt,
3464 LocalNeededSSE, true)
3466 NeededInt = NeededSSE = 0;
3467 return getIndirectReturnResult(Ty);
3469 NeededInt += LocalNeededInt;
3470 NeededSSE += LocalNeededSSE;
3474 return ABIArgInfo::getDirect();
3477 ABIArgInfo X86_64ABIInfo::classifyRegCallStructType(QualType Ty,
3478 unsigned &NeededInt,
3479 unsigned &NeededSSE) const {
3484 return classifyRegCallStructTypeImpl(Ty, NeededInt, NeededSSE);
3487 void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const {
3489 bool IsRegCall = FI.getCallingConvention() == llvm::CallingConv::X86_RegCall;
3491 // Keep track of the number of assigned registers.
3492 unsigned FreeIntRegs = IsRegCall ? 11 : 6;
3493 unsigned FreeSSERegs = IsRegCall ? 16 : 8;
3494 unsigned NeededInt, NeededSSE;
3496 if (IsRegCall && FI.getReturnType()->getTypePtr()->isRecordType() &&
3497 !FI.getReturnType()->getTypePtr()->isUnionType()) {
3498 FI.getReturnInfo() =
3499 classifyRegCallStructType(FI.getReturnType(), NeededInt, NeededSSE);
3500 if (FreeIntRegs >= NeededInt && FreeSSERegs >= NeededSSE) {
3501 FreeIntRegs -= NeededInt;
3502 FreeSSERegs -= NeededSSE;
3504 FI.getReturnInfo() = getIndirectReturnResult(FI.getReturnType());
3506 } else if (!getCXXABI().classifyReturnType(FI))
3507 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
3509 // If the return value is indirect, then the hidden argument is consuming one
3510 // integer register.
3511 if (FI.getReturnInfo().isIndirect())
3514 // The chain argument effectively gives us another free register.
3515 if (FI.isChainCall())
3518 unsigned NumRequiredArgs = FI.getNumRequiredArgs();
3519 // AMD64-ABI 3.2.3p3: Once arguments are classified, the registers
3520 // get assigned (in left-to-right order) for passing as follows...
3522 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
3523 it != ie; ++it, ++ArgNo) {
3524 bool IsNamedArg = ArgNo < NumRequiredArgs;
3526 if (IsRegCall && it->type->isStructureOrClassType())
3527 it->info = classifyRegCallStructType(it->type, NeededInt, NeededSSE);
3529 it->info = classifyArgumentType(it->type, FreeIntRegs, NeededInt,
3530 NeededSSE, IsNamedArg);
3532 // AMD64-ABI 3.2.3p3: If there are no registers available for any
3533 // eightbyte of an argument, the whole argument is passed on the
3534 // stack. If registers have already been assigned for some
3535 // eightbytes of such an argument, the assignments get reverted.
3536 if (FreeIntRegs >= NeededInt && FreeSSERegs >= NeededSSE) {
3537 FreeIntRegs -= NeededInt;
3538 FreeSSERegs -= NeededSSE;
3540 it->info = getIndirectResult(it->type, FreeIntRegs);
3545 static Address EmitX86_64VAArgFromMemory(CodeGenFunction &CGF,
3546 Address VAListAddr, QualType Ty) {
3547 Address overflow_arg_area_p = CGF.Builder.CreateStructGEP(
3548 VAListAddr, 2, CharUnits::fromQuantity(8), "overflow_arg_area_p");
3549 llvm::Value *overflow_arg_area =
3550 CGF.Builder.CreateLoad(overflow_arg_area_p, "overflow_arg_area");
3552 // AMD64-ABI 3.5.7p5: Step 7. Align l->overflow_arg_area upwards to a 16
3553 // byte boundary if alignment needed by type exceeds 8 byte boundary.
3554 // It isn't stated explicitly in the standard, but in practice we use
3555 // alignment greater than 16 where necessary.
3556 CharUnits Align = CGF.getContext().getTypeAlignInChars(Ty);
3557 if (Align > CharUnits::fromQuantity(8)) {
3558 overflow_arg_area = emitRoundPointerUpToAlignment(CGF, overflow_arg_area,
3562 // AMD64-ABI 3.5.7p5: Step 8. Fetch type from l->overflow_arg_area.
3563 llvm::Type *LTy = CGF.ConvertTypeForMem(Ty);
3565 CGF.Builder.CreateBitCast(overflow_arg_area,
3566 llvm::PointerType::getUnqual(LTy));
3568 // AMD64-ABI 3.5.7p5: Step 9. Set l->overflow_arg_area to:
3569 // l->overflow_arg_area + sizeof(type).
3570 // AMD64-ABI 3.5.7p5: Step 10. Align l->overflow_arg_area upwards to
3571 // an 8 byte boundary.
3573 uint64_t SizeInBytes = (CGF.getContext().getTypeSize(Ty) + 7) / 8;
3574 llvm::Value *Offset =
3575 llvm::ConstantInt::get(CGF.Int32Ty, (SizeInBytes + 7) & ~7);
3576 overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset,
3577 "overflow_arg_area.next");
3578 CGF.Builder.CreateStore(overflow_arg_area, overflow_arg_area_p);
3580 // AMD64-ABI 3.5.7p5: Step 11. Return the fetched type.
3581 return Address(Res, Align);
3584 Address X86_64ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
3585 QualType Ty) const {
3586 // Assume that va_list type is correct; should be pointer to LLVM type:
3590 // i8* overflow_arg_area;
3591 // i8* reg_save_area;
3593 unsigned neededInt, neededSSE;
3595 Ty = getContext().getCanonicalType(Ty);
3596 ABIArgInfo AI = classifyArgumentType(Ty, 0, neededInt, neededSSE,
3597 /*isNamedArg*/false);
3599 // AMD64-ABI 3.5.7p5: Step 1. Determine whether type may be passed
3600 // in the registers. If not go to step 7.
3601 if (!neededInt && !neededSSE)
3602 return EmitX86_64VAArgFromMemory(CGF, VAListAddr, Ty);
3604 // AMD64-ABI 3.5.7p5: Step 2. Compute num_gp to hold the number of
3605 // general purpose registers needed to pass type and num_fp to hold
3606 // the number of floating point registers needed.
3608 // AMD64-ABI 3.5.7p5: Step 3. Verify whether arguments fit into
3609 // registers. In the case: l->gp_offset > 48 - num_gp * 8 or
3610 // l->fp_offset > 304 - num_fp * 16 go to step 7.
3612 // NOTE: 304 is a typo, there are (6 * 8 + 8 * 16) = 176 bytes of
3613 // register save space).
3615 llvm::Value *InRegs = nullptr;
3616 Address gp_offset_p = Address::invalid(), fp_offset_p = Address::invalid();
3617 llvm::Value *gp_offset = nullptr, *fp_offset = nullptr;
3620 CGF.Builder.CreateStructGEP(VAListAddr, 0, CharUnits::Zero(),
3622 gp_offset = CGF.Builder.CreateLoad(gp_offset_p, "gp_offset");
3623 InRegs = llvm::ConstantInt::get(CGF.Int32Ty, 48 - neededInt * 8);
3624 InRegs = CGF.Builder.CreateICmpULE(gp_offset, InRegs, "fits_in_gp");
3629 CGF.Builder.CreateStructGEP(VAListAddr, 1, CharUnits::fromQuantity(4),
3631 fp_offset = CGF.Builder.CreateLoad(fp_offset_p, "fp_offset");
3632 llvm::Value *FitsInFP =
3633 llvm::ConstantInt::get(CGF.Int32Ty, 176 - neededSSE * 16);
3634 FitsInFP = CGF.Builder.CreateICmpULE(fp_offset, FitsInFP, "fits_in_fp");
3635 InRegs = InRegs ? CGF.Builder.CreateAnd(InRegs, FitsInFP) : FitsInFP;
3638 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
3639 llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem");
3640 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
3641 CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock);
3643 // Emit code to load the value if it was passed in registers.
3645 CGF.EmitBlock(InRegBlock);
3647 // AMD64-ABI 3.5.7p5: Step 4. Fetch type from l->reg_save_area with
3648 // an offset of l->gp_offset and/or l->fp_offset. This may require
3649 // copying to a temporary location in case the parameter is passed
3650 // in different register classes or requires an alignment greater
3651 // than 8 for general purpose registers and 16 for XMM registers.
3653 // FIXME: This really results in shameful code when we end up needing to
3654 // collect arguments from different places; often what should result in a
3655 // simple assembling of a structure from scattered addresses has many more
3656 // loads than necessary. Can we clean this up?
3657 llvm::Type *LTy = CGF.ConvertTypeForMem(Ty);
3658 llvm::Value *RegSaveArea = CGF.Builder.CreateLoad(
3659 CGF.Builder.CreateStructGEP(VAListAddr, 3, CharUnits::fromQuantity(16)),
3662 Address RegAddr = Address::invalid();
3663 if (neededInt && neededSSE) {
3665 assert(AI.isDirect() && "Unexpected ABI info for mixed regs");
3666 llvm::StructType *ST = cast<llvm::StructType>(AI.getCoerceToType());
3667 Address Tmp = CGF.CreateMemTemp(Ty);
3668 Tmp = CGF.Builder.CreateElementBitCast(Tmp, ST);
3669 assert(ST->getNumElements() == 2 && "Unexpected ABI info for mixed regs");
3670 llvm::Type *TyLo = ST->getElementType(0);
3671 llvm::Type *TyHi = ST->getElementType(1);
3672 assert((TyLo->isFPOrFPVectorTy() ^ TyHi->isFPOrFPVectorTy()) &&
3673 "Unexpected ABI info for mixed regs");
3674 llvm::Type *PTyLo = llvm::PointerType::getUnqual(TyLo);
3675 llvm::Type *PTyHi = llvm::PointerType::getUnqual(TyHi);
3676 llvm::Value *GPAddr = CGF.Builder.CreateGEP(RegSaveArea, gp_offset);
3677 llvm::Value *FPAddr = CGF.Builder.CreateGEP(RegSaveArea, fp_offset);
3678 llvm::Value *RegLoAddr = TyLo->isFPOrFPVectorTy() ? FPAddr : GPAddr;
3679 llvm::Value *RegHiAddr = TyLo->isFPOrFPVectorTy() ? GPAddr : FPAddr;
3681 // Copy the first element.
3682 // FIXME: Our choice of alignment here and below is probably pessimistic.
3683 llvm::Value *V = CGF.Builder.CreateAlignedLoad(
3684 TyLo, CGF.Builder.CreateBitCast(RegLoAddr, PTyLo),
3685 CharUnits::fromQuantity(getDataLayout().getABITypeAlignment(TyLo)));
3686 CGF.Builder.CreateStore(V,
3687 CGF.Builder.CreateStructGEP(Tmp, 0, CharUnits::Zero()));
3689 // Copy the second element.
3690 V = CGF.Builder.CreateAlignedLoad(
3691 TyHi, CGF.Builder.CreateBitCast(RegHiAddr, PTyHi),
3692 CharUnits::fromQuantity(getDataLayout().getABITypeAlignment(TyHi)));
3693 CharUnits Offset = CharUnits::fromQuantity(
3694 getDataLayout().getStructLayout(ST)->getElementOffset(1));
3695 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1, Offset));
3697 RegAddr = CGF.Builder.CreateElementBitCast(Tmp, LTy);
3698 } else if (neededInt) {
3699 RegAddr = Address(CGF.Builder.CreateGEP(RegSaveArea, gp_offset),
3700 CharUnits::fromQuantity(8));
3701 RegAddr = CGF.Builder.CreateElementBitCast(RegAddr, LTy);
3703 // Copy to a temporary if necessary to ensure the appropriate alignment.
3704 std::pair<CharUnits, CharUnits> SizeAlign =
3705 getContext().getTypeInfoInChars(Ty);
3706 uint64_t TySize = SizeAlign.first.getQuantity();
3707 CharUnits TyAlign = SizeAlign.second;
3709 // Copy into a temporary if the type is more aligned than the
3710 // register save area.
3711 if (TyAlign.getQuantity() > 8) {
3712 Address Tmp = CGF.CreateMemTemp(Ty);
3713 CGF.Builder.CreateMemCpy(Tmp, RegAddr, TySize, false);
3717 } else if (neededSSE == 1) {
3718 RegAddr = Address(CGF.Builder.CreateGEP(RegSaveArea, fp_offset),
3719 CharUnits::fromQuantity(16));
3720 RegAddr = CGF.Builder.CreateElementBitCast(RegAddr, LTy);
3722 assert(neededSSE == 2 && "Invalid number of needed registers!");
3723 // SSE registers are spaced 16 bytes apart in the register save
3724 // area, we need to collect the two eightbytes together.
3725 // The ABI isn't explicit about this, but it seems reasonable
3726 // to assume that the slots are 16-byte aligned, since the stack is
3727 // naturally 16-byte aligned and the prologue is expected to store
3728 // all the SSE registers to the RSA.
3729 Address RegAddrLo = Address(CGF.Builder.CreateGEP(RegSaveArea, fp_offset),
3730 CharUnits::fromQuantity(16));
3732 CGF.Builder.CreateConstInBoundsByteGEP(RegAddrLo,
3733 CharUnits::fromQuantity(16));
3734 llvm::Type *DoubleTy = CGF.DoubleTy;
3735 llvm::StructType *ST = llvm::StructType::get(DoubleTy, DoubleTy, nullptr);
3737 Address Tmp = CGF.CreateMemTemp(Ty);
3738 Tmp = CGF.Builder.CreateElementBitCast(Tmp, ST);
3739 V = CGF.Builder.CreateLoad(
3740 CGF.Builder.CreateElementBitCast(RegAddrLo, DoubleTy));
3741 CGF.Builder.CreateStore(V,
3742 CGF.Builder.CreateStructGEP(Tmp, 0, CharUnits::Zero()));
3743 V = CGF.Builder.CreateLoad(
3744 CGF.Builder.CreateElementBitCast(RegAddrHi, DoubleTy));
3745 CGF.Builder.CreateStore(V,
3746 CGF.Builder.CreateStructGEP(Tmp, 1, CharUnits::fromQuantity(8)));
3748 RegAddr = CGF.Builder.CreateElementBitCast(Tmp, LTy);
3751 // AMD64-ABI 3.5.7p5: Step 5. Set:
3752 // l->gp_offset = l->gp_offset + num_gp * 8
3753 // l->fp_offset = l->fp_offset + num_fp * 16.
3755 llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededInt * 8);
3756 CGF.Builder.CreateStore(CGF.Builder.CreateAdd(gp_offset, Offset),
3760 llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededSSE * 16);
3761 CGF.Builder.CreateStore(CGF.Builder.CreateAdd(fp_offset, Offset),
3764 CGF.EmitBranch(ContBlock);
3766 // Emit code to load the value if it was passed in memory.
3768 CGF.EmitBlock(InMemBlock);
3769 Address MemAddr = EmitX86_64VAArgFromMemory(CGF, VAListAddr, Ty);
3771 // Return the appropriate result.
3773 CGF.EmitBlock(ContBlock);
3774 Address ResAddr = emitMergePHI(CGF, RegAddr, InRegBlock, MemAddr, InMemBlock,
3779 Address X86_64ABIInfo::EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
3780 QualType Ty) const {
3781 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*indirect*/ false,
3782 CGF.getContext().getTypeInfoInChars(Ty),
3783 CharUnits::fromQuantity(8),
3784 /*allowHigherAlign*/ false);
3788 WinX86_64ABIInfo::reclassifyHvaArgType(QualType Ty, unsigned &FreeSSERegs,
3789 const ABIArgInfo ¤t) const {
3790 // Assumes vectorCall calling convention.
3791 const Type *Base = nullptr;
3792 uint64_t NumElts = 0;
3794 if (!Ty->isBuiltinType() && !Ty->isVectorType() &&
3795 isHomogeneousAggregate(Ty, Base, NumElts) && FreeSSERegs >= NumElts) {
3796 FreeSSERegs -= NumElts;
3797 return getDirectX86Hva();
3802 ABIArgInfo WinX86_64ABIInfo::classify(QualType Ty, unsigned &FreeSSERegs,
3803 bool IsReturnType, bool IsVectorCall,
3804 bool IsRegCall) const {
3806 if (Ty->isVoidType())
3807 return ABIArgInfo::getIgnore();
3809 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
3810 Ty = EnumTy->getDecl()->getIntegerType();
3812 TypeInfo Info = getContext().getTypeInfo(Ty);
3813 uint64_t Width = Info.Width;
3814 CharUnits Align = getContext().toCharUnitsFromBits(Info.Align);
3816 const RecordType *RT = Ty->getAs<RecordType>();
3818 if (!IsReturnType) {
3819 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI()))
3820 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
3823 if (RT->getDecl()->hasFlexibleArrayMember())
3824 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
3828 const Type *Base = nullptr;
3829 uint64_t NumElts = 0;
3830 // vectorcall adds the concept of a homogenous vector aggregate, similar to
3832 if ((IsVectorCall || IsRegCall) &&
3833 isHomogeneousAggregate(Ty, Base, NumElts)) {
3835 if (FreeSSERegs >= NumElts) {
3836 FreeSSERegs -= NumElts;
3837 if (IsReturnType || Ty->isBuiltinType() || Ty->isVectorType())
3838 return ABIArgInfo::getDirect();
3839 return ABIArgInfo::getExpand();
3841 return ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
3842 } else if (IsVectorCall) {
3843 if (FreeSSERegs >= NumElts &&
3844 (IsReturnType || Ty->isBuiltinType() || Ty->isVectorType())) {
3845 FreeSSERegs -= NumElts;
3846 return ABIArgInfo::getDirect();
3847 } else if (IsReturnType) {
3848 return ABIArgInfo::getExpand();
3849 } else if (!Ty->isBuiltinType() && !Ty->isVectorType()) {
3850 // HVAs are delayed and reclassified in the 2nd step.
3851 return ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
3856 if (Ty->isMemberPointerType()) {
3857 // If the member pointer is represented by an LLVM int or ptr, pass it
3859 llvm::Type *LLTy = CGT.ConvertType(Ty);
3860 if (LLTy->isPointerTy() || LLTy->isIntegerTy())
3861 return ABIArgInfo::getDirect();
3864 if (RT || Ty->isAnyComplexType() || Ty->isMemberPointerType()) {
3865 // MS x64 ABI requirement: "Any argument that doesn't fit in 8 bytes, or is
3866 // not 1, 2, 4, or 8 bytes, must be passed by reference."
3867 if (Width > 64 || !llvm::isPowerOf2_64(Width))
3868 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
3870 // Otherwise, coerce it to a small integer.
3871 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Width));
3874 // Bool type is always extended to the ABI, other builtin types are not
3876 const BuiltinType *BT = Ty->getAs<BuiltinType>();
3877 if (BT && BT->getKind() == BuiltinType::Bool)
3878 return ABIArgInfo::getExtend();
3880 // Mingw64 GCC uses the old 80 bit extended precision floating point unit. It
3881 // passes them indirectly through memory.
3882 if (IsMingw64 && BT && BT->getKind() == BuiltinType::LongDouble) {
3883 const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat();
3884 if (LDF == &llvm::APFloat::x87DoubleExtended())
3885 return ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
3888 return ABIArgInfo::getDirect();
3891 void WinX86_64ABIInfo::computeVectorCallArgs(CGFunctionInfo &FI,
3892 unsigned FreeSSERegs,
3894 bool IsRegCall) const {
3896 for (auto &I : FI.arguments()) {
3897 if (Count < VectorcallMaxParamNumAsReg)
3898 I.info = classify(I.type, FreeSSERegs, false, IsVectorCall, IsRegCall);
3900 // Since these cannot be passed in registers, pretend no registers
3902 unsigned ZeroSSERegsAvail = 0;
3903 I.info = classify(I.type, /*FreeSSERegs=*/ZeroSSERegsAvail, false,
3904 IsVectorCall, IsRegCall);
3910 for (auto &I : FI.arguments()) {
3911 if (Count < VectorcallMaxParamNumAsReg)
3912 I.info = reclassifyHvaArgType(I.type, FreeSSERegs, I.info);
3917 void WinX86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const {
3919 FI.getCallingConvention() == llvm::CallingConv::X86_VectorCall;
3920 bool IsRegCall = FI.getCallingConvention() == llvm::CallingConv::X86_RegCall;
3922 unsigned FreeSSERegs = 0;
3924 // We can use up to 4 SSE return registers with vectorcall.
3926 } else if (IsRegCall) {
3927 // RegCall gives us 16 SSE registers.
3931 if (!getCXXABI().classifyReturnType(FI))
3932 FI.getReturnInfo() = classify(FI.getReturnType(), FreeSSERegs, true,
3933 IsVectorCall, IsRegCall);
3936 // We can use up to 6 SSE register parameters with vectorcall.
3938 } else if (IsRegCall) {
3939 // RegCall gives us 16 SSE registers, we can reuse the return registers.
3944 computeVectorCallArgs(FI, FreeSSERegs, IsVectorCall, IsRegCall);
3946 for (auto &I : FI.arguments())
3947 I.info = classify(I.type, FreeSSERegs, false, IsVectorCall, IsRegCall);
3952 Address WinX86_64ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
3953 QualType Ty) const {
3955 bool IsIndirect = false;
3957 // MS x64 ABI requirement: "Any argument that doesn't fit in 8 bytes, or is
3958 // not 1, 2, 4, or 8 bytes, must be passed by reference."
3959 if (isAggregateTypeForABI(Ty) || Ty->isMemberPointerType()) {
3960 uint64_t Width = getContext().getTypeSize(Ty);
3961 IsIndirect = Width > 64 || !llvm::isPowerOf2_64(Width);
3964 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect,
3965 CGF.getContext().getTypeInfoInChars(Ty),
3966 CharUnits::fromQuantity(8),
3967 /*allowHigherAlign*/ false);
3972 /// PPC32_SVR4_ABIInfo - The 32-bit PowerPC ELF (SVR4) ABI information.
3973 class PPC32_SVR4_ABIInfo : public DefaultABIInfo {
3974 bool IsSoftFloatABI;
3976 PPC32_SVR4_ABIInfo(CodeGen::CodeGenTypes &CGT, bool SoftFloatABI)
3977 : DefaultABIInfo(CGT), IsSoftFloatABI(SoftFloatABI) {}
3979 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
3980 QualType Ty) const override;
3983 class PPC32TargetCodeGenInfo : public TargetCodeGenInfo {
3985 PPC32TargetCodeGenInfo(CodeGenTypes &CGT, bool SoftFloatABI)
3986 : TargetCodeGenInfo(new PPC32_SVR4_ABIInfo(CGT, SoftFloatABI)) {}
3988 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
3989 // This is recovered from gcc output.
3990 return 1; // r1 is the dedicated stack pointer
3993 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
3994 llvm::Value *Address) const override;
3999 // TODO: this implementation is now likely redundant with
4000 // DefaultABIInfo::EmitVAArg.
4001 Address PPC32_SVR4_ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAList,
4002 QualType Ty) const {
4003 const unsigned OverflowLimit = 8;
4004 if (const ComplexType *CTy = Ty->getAs<ComplexType>()) {
4005 // TODO: Implement this. For now ignore.
4007 return Address::invalid(); // FIXME?
4010 // struct __va_list_tag {
4011 // unsigned char gpr;
4012 // unsigned char fpr;
4013 // unsigned short reserved;
4014 // void *overflow_arg_area;
4015 // void *reg_save_area;
4018 bool isI64 = Ty->isIntegerType() && getContext().getTypeSize(Ty) == 64;
4020 Ty->isIntegerType() || Ty->isPointerType() || Ty->isAggregateType();
4021 bool isF64 = Ty->isFloatingType() && getContext().getTypeSize(Ty) == 64;
4023 // All aggregates are passed indirectly? That doesn't seem consistent
4024 // with the argument-lowering code.
4025 bool isIndirect = Ty->isAggregateType();
4027 CGBuilderTy &Builder = CGF.Builder;
4029 // The calling convention either uses 1-2 GPRs or 1 FPR.
4030 Address NumRegsAddr = Address::invalid();
4031 if (isInt || IsSoftFloatABI) {
4032 NumRegsAddr = Builder.CreateStructGEP(VAList, 0, CharUnits::Zero(), "gpr");
4034 NumRegsAddr = Builder.CreateStructGEP(VAList, 1, CharUnits::One(), "fpr");
4037 llvm::Value *NumRegs = Builder.CreateLoad(NumRegsAddr, "numUsedRegs");
4039 // "Align" the register count when TY is i64.
4040 if (isI64 || (isF64 && IsSoftFloatABI)) {
4041 NumRegs = Builder.CreateAdd(NumRegs, Builder.getInt8(1));
4042 NumRegs = Builder.CreateAnd(NumRegs, Builder.getInt8((uint8_t) ~1U));
4046 Builder.CreateICmpULT(NumRegs, Builder.getInt8(OverflowLimit), "cond");
4048 llvm::BasicBlock *UsingRegs = CGF.createBasicBlock("using_regs");
4049 llvm::BasicBlock *UsingOverflow = CGF.createBasicBlock("using_overflow");
4050 llvm::BasicBlock *Cont = CGF.createBasicBlock("cont");
4052 Builder.CreateCondBr(CC, UsingRegs, UsingOverflow);
4054 llvm::Type *DirectTy = CGF.ConvertType(Ty);
4055 if (isIndirect) DirectTy = DirectTy->getPointerTo(0);
4057 // Case 1: consume registers.
4058 Address RegAddr = Address::invalid();
4060 CGF.EmitBlock(UsingRegs);
4062 Address RegSaveAreaPtr =
4063 Builder.CreateStructGEP(VAList, 4, CharUnits::fromQuantity(8));
4064 RegAddr = Address(Builder.CreateLoad(RegSaveAreaPtr),
4065 CharUnits::fromQuantity(8));
4066 assert(RegAddr.getElementType() == CGF.Int8Ty);
4068 // Floating-point registers start after the general-purpose registers.
4069 if (!(isInt || IsSoftFloatABI)) {
4070 RegAddr = Builder.CreateConstInBoundsByteGEP(RegAddr,
4071 CharUnits::fromQuantity(32));
4074 // Get the address of the saved value by scaling the number of
4075 // registers we've used by the number of
4076 CharUnits RegSize = CharUnits::fromQuantity((isInt || IsSoftFloatABI) ? 4 : 8);
4077 llvm::Value *RegOffset =
4078 Builder.CreateMul(NumRegs, Builder.getInt8(RegSize.getQuantity()));
4079 RegAddr = Address(Builder.CreateInBoundsGEP(CGF.Int8Ty,
4080 RegAddr.getPointer(), RegOffset),
4081 RegAddr.getAlignment().alignmentOfArrayElement(RegSize));
4082 RegAddr = Builder.CreateElementBitCast(RegAddr, DirectTy);
4084 // Increase the used-register count.
4086 Builder.CreateAdd(NumRegs,
4087 Builder.getInt8((isI64 || (isF64 && IsSoftFloatABI)) ? 2 : 1));
4088 Builder.CreateStore(NumRegs, NumRegsAddr);
4090 CGF.EmitBranch(Cont);
4093 // Case 2: consume space in the overflow area.
4094 Address MemAddr = Address::invalid();
4096 CGF.EmitBlock(UsingOverflow);
4098 Builder.CreateStore(Builder.getInt8(OverflowLimit), NumRegsAddr);
4100 // Everything in the overflow area is rounded up to a size of at least 4.
4101 CharUnits OverflowAreaAlign = CharUnits::fromQuantity(4);
4105 auto TypeInfo = CGF.getContext().getTypeInfoInChars(Ty);
4106 Size = TypeInfo.first.alignTo(OverflowAreaAlign);
4108 Size = CGF.getPointerSize();
4111 Address OverflowAreaAddr =
4112 Builder.CreateStructGEP(VAList, 3, CharUnits::fromQuantity(4));
4113 Address OverflowArea(Builder.CreateLoad(OverflowAreaAddr, "argp.cur"),
4115 // Round up address of argument to alignment
4116 CharUnits Align = CGF.getContext().getTypeAlignInChars(Ty);
4117 if (Align > OverflowAreaAlign) {
4118 llvm::Value *Ptr = OverflowArea.getPointer();
4119 OverflowArea = Address(emitRoundPointerUpToAlignment(CGF, Ptr, Align),
4123 MemAddr = Builder.CreateElementBitCast(OverflowArea, DirectTy);
4125 // Increase the overflow area.
4126 OverflowArea = Builder.CreateConstInBoundsByteGEP(OverflowArea, Size);
4127 Builder.CreateStore(OverflowArea.getPointer(), OverflowAreaAddr);
4128 CGF.EmitBranch(Cont);
4131 CGF.EmitBlock(Cont);
4133 // Merge the cases with a phi.
4134 Address Result = emitMergePHI(CGF, RegAddr, UsingRegs, MemAddr, UsingOverflow,
4137 // Load the pointer if the argument was passed indirectly.
4139 Result = Address(Builder.CreateLoad(Result, "aggr"),
4140 getContext().getTypeAlignInChars(Ty));
4147 PPC32TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
4148 llvm::Value *Address) const {
4149 // This is calculated from the LLVM and GCC tables and verified
4150 // against gcc output. AFAIK all ABIs use the same encoding.
4152 CodeGen::CGBuilderTy &Builder = CGF.Builder;
4154 llvm::IntegerType *i8 = CGF.Int8Ty;
4155 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
4156 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
4157 llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16);
4159 // 0-31: r0-31, the 4-byte general-purpose registers
4160 AssignToArrayRange(Builder, Address, Four8, 0, 31);
4162 // 32-63: fp0-31, the 8-byte floating-point registers
4163 AssignToArrayRange(Builder, Address, Eight8, 32, 63);
4165 // 64-76 are various 4-byte special-purpose registers:
4172 AssignToArrayRange(Builder, Address, Four8, 64, 76);
4174 // 77-108: v0-31, the 16-byte vector registers
4175 AssignToArrayRange(Builder, Address, Sixteen8, 77, 108);
4182 AssignToArrayRange(Builder, Address, Four8, 109, 113);
4190 /// PPC64_SVR4_ABIInfo - The 64-bit PowerPC ELF (SVR4) ABI information.
4191 class PPC64_SVR4_ABIInfo : public ABIInfo {
4199 static const unsigned GPRBits = 64;
4202 bool IsSoftFloatABI;
4204 // A vector of float or double will be promoted to <4 x f32> or <4 x f64> and
4205 // will be passed in a QPX register.
4206 bool IsQPXVectorTy(const Type *Ty) const {
4210 if (const VectorType *VT = Ty->getAs<VectorType>()) {
4211 unsigned NumElements = VT->getNumElements();
4212 if (NumElements == 1)
4215 if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::Double)) {
4216 if (getContext().getTypeSize(Ty) <= 256)
4218 } else if (VT->getElementType()->
4219 isSpecificBuiltinType(BuiltinType::Float)) {
4220 if (getContext().getTypeSize(Ty) <= 128)
4228 bool IsQPXVectorTy(QualType Ty) const {
4229 return IsQPXVectorTy(Ty.getTypePtr());
4233 PPC64_SVR4_ABIInfo(CodeGen::CodeGenTypes &CGT, ABIKind Kind, bool HasQPX,
4235 : ABIInfo(CGT), Kind(Kind), HasQPX(HasQPX),
4236 IsSoftFloatABI(SoftFloatABI) {}
4238 bool isPromotableTypeForABI(QualType Ty) const;
4239 CharUnits getParamTypeAlignment(QualType Ty) const;
4241 ABIArgInfo classifyReturnType(QualType RetTy) const;
4242 ABIArgInfo classifyArgumentType(QualType Ty) const;
4244 bool isHomogeneousAggregateBaseType(QualType Ty) const override;
4245 bool isHomogeneousAggregateSmallEnough(const Type *Ty,
4246 uint64_t Members) const override;
4248 // TODO: We can add more logic to computeInfo to improve performance.
4249 // Example: For aggregate arguments that fit in a register, we could
4250 // use getDirectInReg (as is done below for structs containing a single
4251 // floating-point value) to avoid pushing them to memory on function
4252 // entry. This would require changing the logic in PPCISelLowering
4253 // when lowering the parameters in the caller and args in the callee.
4254 void computeInfo(CGFunctionInfo &FI) const override {
4255 if (!getCXXABI().classifyReturnType(FI))
4256 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
4257 for (auto &I : FI.arguments()) {
4258 // We rely on the default argument classification for the most part.
4259 // One exception: An aggregate containing a single floating-point
4260 // or vector item must be passed in a register if one is available.
4261 const Type *T = isSingleElementStruct(I.type, getContext());
4263 const BuiltinType *BT = T->getAs<BuiltinType>();
4264 if (IsQPXVectorTy(T) ||
4265 (T->isVectorType() && getContext().getTypeSize(T) == 128) ||
4266 (BT && BT->isFloatingPoint())) {
4268 I.info = ABIArgInfo::getDirectInReg(CGT.ConvertType(QT));
4272 I.info = classifyArgumentType(I.type);
4276 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
4277 QualType Ty) const override;
4280 class PPC64_SVR4_TargetCodeGenInfo : public TargetCodeGenInfo {
4283 PPC64_SVR4_TargetCodeGenInfo(CodeGenTypes &CGT,
4284 PPC64_SVR4_ABIInfo::ABIKind Kind, bool HasQPX,
4286 : TargetCodeGenInfo(new PPC64_SVR4_ABIInfo(CGT, Kind, HasQPX,
4289 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
4290 // This is recovered from gcc output.
4291 return 1; // r1 is the dedicated stack pointer
4294 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
4295 llvm::Value *Address) const override;
4298 class PPC64TargetCodeGenInfo : public DefaultTargetCodeGenInfo {
4300 PPC64TargetCodeGenInfo(CodeGenTypes &CGT) : DefaultTargetCodeGenInfo(CGT) {}
4302 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
4303 // This is recovered from gcc output.
4304 return 1; // r1 is the dedicated stack pointer
4307 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
4308 llvm::Value *Address) const override;
4313 // Return true if the ABI requires Ty to be passed sign- or zero-
4314 // extended to 64 bits.
4316 PPC64_SVR4_ABIInfo::isPromotableTypeForABI(QualType Ty) const {
4317 // Treat an enum type as its underlying type.
4318 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
4319 Ty = EnumTy->getDecl()->getIntegerType();
4321 // Promotable integer types are required to be promoted by the ABI.
4322 if (Ty->isPromotableIntegerType())
4325 // In addition to the usual promotable integer types, we also need to
4326 // extend all 32-bit types, since the ABI requires promotion to 64 bits.
4327 if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
4328 switch (BT->getKind()) {
4329 case BuiltinType::Int:
4330 case BuiltinType::UInt:
4339 /// isAlignedParamType - Determine whether a type requires 16-byte or
4340 /// higher alignment in the parameter area. Always returns at least 8.
4341 CharUnits PPC64_SVR4_ABIInfo::getParamTypeAlignment(QualType Ty) const {
4342 // Complex types are passed just like their elements.
4343 if (const ComplexType *CTy = Ty->getAs<ComplexType>())
4344 Ty = CTy->getElementType();
4346 // Only vector types of size 16 bytes need alignment (larger types are
4347 // passed via reference, smaller types are not aligned).
4348 if (IsQPXVectorTy(Ty)) {
4349 if (getContext().getTypeSize(Ty) > 128)
4350 return CharUnits::fromQuantity(32);
4352 return CharUnits::fromQuantity(16);
4353 } else if (Ty->isVectorType()) {
4354 return CharUnits::fromQuantity(getContext().getTypeSize(Ty) == 128 ? 16 : 8);
4357 // For single-element float/vector structs, we consider the whole type
4358 // to have the same alignment requirements as its single element.
4359 const Type *AlignAsType = nullptr;
4360 const Type *EltType = isSingleElementStruct(Ty, getContext());
4362 const BuiltinType *BT = EltType->getAs<BuiltinType>();
4363 if (IsQPXVectorTy(EltType) || (EltType->isVectorType() &&
4364 getContext().getTypeSize(EltType) == 128) ||
4365 (BT && BT->isFloatingPoint()))
4366 AlignAsType = EltType;
4369 // Likewise for ELFv2 homogeneous aggregates.
4370 const Type *Base = nullptr;
4371 uint64_t Members = 0;
4372 if (!AlignAsType && Kind == ELFv2 &&
4373 isAggregateTypeForABI(Ty) && isHomogeneousAggregate(Ty, Base, Members))
4376 // With special case aggregates, only vector base types need alignment.
4377 if (AlignAsType && IsQPXVectorTy(AlignAsType)) {
4378 if (getContext().getTypeSize(AlignAsType) > 128)
4379 return CharUnits::fromQuantity(32);
4381 return CharUnits::fromQuantity(16);
4382 } else if (AlignAsType) {
4383 return CharUnits::fromQuantity(AlignAsType->isVectorType() ? 16 : 8);
4386 // Otherwise, we only need alignment for any aggregate type that
4387 // has an alignment requirement of >= 16 bytes.
4388 if (isAggregateTypeForABI(Ty) && getContext().getTypeAlign(Ty) >= 128) {
4389 if (HasQPX && getContext().getTypeAlign(Ty) >= 256)
4390 return CharUnits::fromQuantity(32);
4391 return CharUnits::fromQuantity(16);
4394 return CharUnits::fromQuantity(8);
4397 /// isHomogeneousAggregate - Return true if a type is an ELFv2 homogeneous
4398 /// aggregate. Base is set to the base element type, and Members is set
4399 /// to the number of base elements.
4400 bool ABIInfo::isHomogeneousAggregate(QualType Ty, const Type *&Base,
4401 uint64_t &Members) const {
4402 if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
4403 uint64_t NElements = AT->getSize().getZExtValue();
4406 if (!isHomogeneousAggregate(AT->getElementType(), Base, Members))
4408 Members *= NElements;
4409 } else if (const RecordType *RT = Ty->getAs<RecordType>()) {
4410 const RecordDecl *RD = RT->getDecl();
4411 if (RD->hasFlexibleArrayMember())
4416 // If this is a C++ record, check the bases first.
4417 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
4418 for (const auto &I : CXXRD->bases()) {
4419 // Ignore empty records.
4420 if (isEmptyRecord(getContext(), I.getType(), true))
4423 uint64_t FldMembers;
4424 if (!isHomogeneousAggregate(I.getType(), Base, FldMembers))
4427 Members += FldMembers;
4431 for (const auto *FD : RD->fields()) {
4432 // Ignore (non-zero arrays of) empty records.
4433 QualType FT = FD->getType();
4434 while (const ConstantArrayType *AT =
4435 getContext().getAsConstantArrayType(FT)) {
4436 if (AT->getSize().getZExtValue() == 0)
4438 FT = AT->getElementType();
4440 if (isEmptyRecord(getContext(), FT, true))
4443 // For compatibility with GCC, ignore empty bitfields in C++ mode.
4444 if (getContext().getLangOpts().CPlusPlus &&
4445 FD->isBitField() && FD->getBitWidthValue(getContext()) == 0)
4448 uint64_t FldMembers;
4449 if (!isHomogeneousAggregate(FD->getType(), Base, FldMembers))
4452 Members = (RD->isUnion() ?
4453 std::max(Members, FldMembers) : Members + FldMembers);
4459 // Ensure there is no padding.
4460 if (getContext().getTypeSize(Base) * Members !=
4461 getContext().getTypeSize(Ty))
4465 if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
4467 Ty = CT->getElementType();
4470 // Most ABIs only support float, double, and some vector type widths.
4471 if (!isHomogeneousAggregateBaseType(Ty))
4474 // The base type must be the same for all members. Types that
4475 // agree in both total size and mode (float vs. vector) are
4476 // treated as being equivalent here.
4477 const Type *TyPtr = Ty.getTypePtr();
4480 // If it's a non-power-of-2 vector, its size is already a power-of-2,
4481 // so make sure to widen it explicitly.
4482 if (const VectorType *VT = Base->getAs<VectorType>()) {
4483 QualType EltTy = VT->getElementType();
4484 unsigned NumElements =
4485 getContext().getTypeSize(VT) / getContext().getTypeSize(EltTy);
4487 .getVectorType(EltTy, NumElements, VT->getVectorKind())
4492 if (Base->isVectorType() != TyPtr->isVectorType() ||
4493 getContext().getTypeSize(Base) != getContext().getTypeSize(TyPtr))
4496 return Members > 0 && isHomogeneousAggregateSmallEnough(Base, Members);
4499 bool PPC64_SVR4_ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
4500 // Homogeneous aggregates for ELFv2 must have base types of float,
4501 // double, long double, or 128-bit vectors.
4502 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
4503 if (BT->getKind() == BuiltinType::Float ||
4504 BT->getKind() == BuiltinType::Double ||
4505 BT->getKind() == BuiltinType::LongDouble) {
4511 if (const VectorType *VT = Ty->getAs<VectorType>()) {
4512 if (getContext().getTypeSize(VT) == 128 || IsQPXVectorTy(Ty))
4518 bool PPC64_SVR4_ABIInfo::isHomogeneousAggregateSmallEnough(
4519 const Type *Base, uint64_t Members) const {
4520 // Vector types require one register, floating point types require one
4521 // or two registers depending on their size.
4523 Base->isVectorType() ? 1 : (getContext().getTypeSize(Base) + 63) / 64;
4525 // Homogeneous Aggregates may occupy at most 8 registers.
4526 return Members * NumRegs <= 8;
4530 PPC64_SVR4_ABIInfo::classifyArgumentType(QualType Ty) const {
4531 Ty = useFirstFieldIfTransparentUnion(Ty);
4533 if (Ty->isAnyComplexType())
4534 return ABIArgInfo::getDirect();
4536 // Non-Altivec vector types are passed in GPRs (smaller than 16 bytes)
4537 // or via reference (larger than 16 bytes).
4538 if (Ty->isVectorType() && !IsQPXVectorTy(Ty)) {
4539 uint64_t Size = getContext().getTypeSize(Ty);
4541 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
4542 else if (Size < 128) {
4543 llvm::Type *CoerceTy = llvm::IntegerType::get(getVMContext(), Size);
4544 return ABIArgInfo::getDirect(CoerceTy);
4548 if (isAggregateTypeForABI(Ty)) {
4549 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
4550 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
4552 uint64_t ABIAlign = getParamTypeAlignment(Ty).getQuantity();
4553 uint64_t TyAlign = getContext().getTypeAlignInChars(Ty).getQuantity();
4555 // ELFv2 homogeneous aggregates are passed as array types.
4556 const Type *Base = nullptr;
4557 uint64_t Members = 0;
4558 if (Kind == ELFv2 &&
4559 isHomogeneousAggregate(Ty, Base, Members)) {
4560 llvm::Type *BaseTy = CGT.ConvertType(QualType(Base, 0));
4561 llvm::Type *CoerceTy = llvm::ArrayType::get(BaseTy, Members);
4562 return ABIArgInfo::getDirect(CoerceTy);
4565 // If an aggregate may end up fully in registers, we do not
4566 // use the ByVal method, but pass the aggregate as array.
4567 // This is usually beneficial since we avoid forcing the
4568 // back-end to store the argument to memory.
4569 uint64_t Bits = getContext().getTypeSize(Ty);
4570 if (Bits > 0 && Bits <= 8 * GPRBits) {
4571 llvm::Type *CoerceTy;
4573 // Types up to 8 bytes are passed as integer type (which will be
4574 // properly aligned in the argument save area doubleword).
4575 if (Bits <= GPRBits)
4577 llvm::IntegerType::get(getVMContext(), llvm::alignTo(Bits, 8));
4578 // Larger types are passed as arrays, with the base type selected
4579 // according to the required alignment in the save area.
4581 uint64_t RegBits = ABIAlign * 8;
4582 uint64_t NumRegs = llvm::alignTo(Bits, RegBits) / RegBits;
4583 llvm::Type *RegTy = llvm::IntegerType::get(getVMContext(), RegBits);
4584 CoerceTy = llvm::ArrayType::get(RegTy, NumRegs);
4587 return ABIArgInfo::getDirect(CoerceTy);
4590 // All other aggregates are passed ByVal.
4591 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(ABIAlign),
4593 /*Realign=*/TyAlign > ABIAlign);
4596 return (isPromotableTypeForABI(Ty) ?
4597 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
4601 PPC64_SVR4_ABIInfo::classifyReturnType(QualType RetTy) const {
4602 if (RetTy->isVoidType())
4603 return ABIArgInfo::getIgnore();
4605 if (RetTy->isAnyComplexType())
4606 return ABIArgInfo::getDirect();
4608 // Non-Altivec vector types are returned in GPRs (smaller than 16 bytes)
4609 // or via reference (larger than 16 bytes).
4610 if (RetTy->isVectorType() && !IsQPXVectorTy(RetTy)) {
4611 uint64_t Size = getContext().getTypeSize(RetTy);
4613 return getNaturalAlignIndirect(RetTy);
4614 else if (Size < 128) {
4615 llvm::Type *CoerceTy = llvm::IntegerType::get(getVMContext(), Size);
4616 return ABIArgInfo::getDirect(CoerceTy);
4620 if (isAggregateTypeForABI(RetTy)) {
4621 // ELFv2 homogeneous aggregates are returned as array types.
4622 const Type *Base = nullptr;
4623 uint64_t Members = 0;
4624 if (Kind == ELFv2 &&
4625 isHomogeneousAggregate(RetTy, Base, Members)) {
4626 llvm::Type *BaseTy = CGT.ConvertType(QualType(Base, 0));
4627 llvm::Type *CoerceTy = llvm::ArrayType::get(BaseTy, Members);
4628 return ABIArgInfo::getDirect(CoerceTy);
4631 // ELFv2 small aggregates are returned in up to two registers.
4632 uint64_t Bits = getContext().getTypeSize(RetTy);
4633 if (Kind == ELFv2 && Bits <= 2 * GPRBits) {
4635 return ABIArgInfo::getIgnore();
4637 llvm::Type *CoerceTy;
4638 if (Bits > GPRBits) {
4639 CoerceTy = llvm::IntegerType::get(getVMContext(), GPRBits);
4640 CoerceTy = llvm::StructType::get(CoerceTy, CoerceTy, nullptr);
4643 llvm::IntegerType::get(getVMContext(), llvm::alignTo(Bits, 8));
4644 return ABIArgInfo::getDirect(CoerceTy);
4647 // All other aggregates are returned indirectly.
4648 return getNaturalAlignIndirect(RetTy);
4651 return (isPromotableTypeForABI(RetTy) ?
4652 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
4655 // Based on ARMABIInfo::EmitVAArg, adjusted for 64-bit machine.
4656 Address PPC64_SVR4_ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
4657 QualType Ty) const {
4658 auto TypeInfo = getContext().getTypeInfoInChars(Ty);
4659 TypeInfo.second = getParamTypeAlignment(Ty);
4661 CharUnits SlotSize = CharUnits::fromQuantity(8);
4663 // If we have a complex type and the base type is smaller than 8 bytes,
4664 // the ABI calls for the real and imaginary parts to be right-adjusted
4665 // in separate doublewords. However, Clang expects us to produce a
4666 // pointer to a structure with the two parts packed tightly. So generate
4667 // loads of the real and imaginary parts relative to the va_list pointer,
4668 // and store them to a temporary structure.
4669 if (const ComplexType *CTy = Ty->getAs<ComplexType>()) {
4670 CharUnits EltSize = TypeInfo.first / 2;
4671 if (EltSize < SlotSize) {
4672 Address Addr = emitVoidPtrDirectVAArg(CGF, VAListAddr, CGF.Int8Ty,
4673 SlotSize * 2, SlotSize,
4674 SlotSize, /*AllowHigher*/ true);
4676 Address RealAddr = Addr;
4677 Address ImagAddr = RealAddr;
4678 if (CGF.CGM.getDataLayout().isBigEndian()) {
4679 RealAddr = CGF.Builder.CreateConstInBoundsByteGEP(RealAddr,
4680 SlotSize - EltSize);
4681 ImagAddr = CGF.Builder.CreateConstInBoundsByteGEP(ImagAddr,
4682 2 * SlotSize - EltSize);
4684 ImagAddr = CGF.Builder.CreateConstInBoundsByteGEP(RealAddr, SlotSize);
4687 llvm::Type *EltTy = CGF.ConvertTypeForMem(CTy->getElementType());
4688 RealAddr = CGF.Builder.CreateElementBitCast(RealAddr, EltTy);
4689 ImagAddr = CGF.Builder.CreateElementBitCast(ImagAddr, EltTy);
4690 llvm::Value *Real = CGF.Builder.CreateLoad(RealAddr, ".vareal");
4691 llvm::Value *Imag = CGF.Builder.CreateLoad(ImagAddr, ".vaimag");
4693 Address Temp = CGF.CreateMemTemp(Ty, "vacplx");
4694 CGF.EmitStoreOfComplex({Real, Imag}, CGF.MakeAddrLValue(Temp, Ty),
4700 // Otherwise, just use the general rule.
4701 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*Indirect*/ false,
4702 TypeInfo, SlotSize, /*AllowHigher*/ true);
4706 PPC64_initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
4707 llvm::Value *Address) {
4708 // This is calculated from the LLVM and GCC tables and verified
4709 // against gcc output. AFAIK all ABIs use the same encoding.
4711 CodeGen::CGBuilderTy &Builder = CGF.Builder;
4713 llvm::IntegerType *i8 = CGF.Int8Ty;
4714 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
4715 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
4716 llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16);
4718 // 0-31: r0-31, the 8-byte general-purpose registers
4719 AssignToArrayRange(Builder, Address, Eight8, 0, 31);
4721 // 32-63: fp0-31, the 8-byte floating-point registers
4722 AssignToArrayRange(Builder, Address, Eight8, 32, 63);
4724 // 64-67 are various 8-byte special-purpose registers:
4729 AssignToArrayRange(Builder, Address, Eight8, 64, 67);
4731 // 68-76 are various 4-byte special-purpose registers:
4734 AssignToArrayRange(Builder, Address, Four8, 68, 76);
4736 // 77-108: v0-31, the 16-byte vector registers
4737 AssignToArrayRange(Builder, Address, Sixteen8, 77, 108);
4747 AssignToArrayRange(Builder, Address, Eight8, 109, 116);
4753 PPC64_SVR4_TargetCodeGenInfo::initDwarfEHRegSizeTable(
4754 CodeGen::CodeGenFunction &CGF,
4755 llvm::Value *Address) const {
4757 return PPC64_initDwarfEHRegSizeTable(CGF, Address);
4761 PPC64TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
4762 llvm::Value *Address) const {
4764 return PPC64_initDwarfEHRegSizeTable(CGF, Address);
4767 //===----------------------------------------------------------------------===//
4768 // AArch64 ABI Implementation
4769 //===----------------------------------------------------------------------===//
4773 class AArch64ABIInfo : public SwiftABIInfo {
4784 AArch64ABIInfo(CodeGenTypes &CGT, ABIKind Kind)
4785 : SwiftABIInfo(CGT), Kind(Kind) {}
4788 ABIKind getABIKind() const { return Kind; }
4789 bool isDarwinPCS() const { return Kind == DarwinPCS; }
4791 ABIArgInfo classifyReturnType(QualType RetTy) const;
4792 ABIArgInfo classifyArgumentType(QualType RetTy) const;
4793 bool isHomogeneousAggregateBaseType(QualType Ty) const override;
4794 bool isHomogeneousAggregateSmallEnough(const Type *Ty,
4795 uint64_t Members) const override;
4797 bool isIllegalVectorType(QualType Ty) const;
4799 void computeInfo(CGFunctionInfo &FI) const override {
4800 if (!getCXXABI().classifyReturnType(FI))
4801 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
4803 for (auto &it : FI.arguments())
4804 it.info = classifyArgumentType(it.type);
4807 Address EmitDarwinVAArg(Address VAListAddr, QualType Ty,
4808 CodeGenFunction &CGF) const;
4810 Address EmitAAPCSVAArg(Address VAListAddr, QualType Ty,
4811 CodeGenFunction &CGF) const;
4813 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
4814 QualType Ty) const override {
4815 return isDarwinPCS() ? EmitDarwinVAArg(VAListAddr, Ty, CGF)
4816 : EmitAAPCSVAArg(VAListAddr, Ty, CGF);
4819 bool shouldPassIndirectlyForSwift(CharUnits totalSize,
4820 ArrayRef<llvm::Type*> scalars,
4821 bool asReturnValue) const override {
4822 return occupiesMoreThan(CGT, scalars, /*total*/ 4);
4824 bool isSwiftErrorInRegister() const override {
4829 class AArch64TargetCodeGenInfo : public TargetCodeGenInfo {
4831 AArch64TargetCodeGenInfo(CodeGenTypes &CGT, AArch64ABIInfo::ABIKind Kind)
4832 : TargetCodeGenInfo(new AArch64ABIInfo(CGT, Kind)) {}
4834 StringRef getARCRetainAutoreleasedReturnValueMarker() const override {
4835 return "mov\tfp, fp\t\t# marker for objc_retainAutoreleaseReturnValue";
4838 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
4842 bool doesReturnSlotInterfereWithArgs() const override { return false; }
4846 ABIArgInfo AArch64ABIInfo::classifyArgumentType(QualType Ty) const {
4847 Ty = useFirstFieldIfTransparentUnion(Ty);
4849 // Handle illegal vector types here.
4850 if (isIllegalVectorType(Ty)) {
4851 uint64_t Size = getContext().getTypeSize(Ty);
4852 // Android promotes <2 x i8> to i16, not i32
4853 if (isAndroid() && (Size <= 16)) {
4854 llvm::Type *ResType = llvm::Type::getInt16Ty(getVMContext());
4855 return ABIArgInfo::getDirect(ResType);
4858 llvm::Type *ResType = llvm::Type::getInt32Ty(getVMContext());
4859 return ABIArgInfo::getDirect(ResType);
4862 llvm::Type *ResType =
4863 llvm::VectorType::get(llvm::Type::getInt32Ty(getVMContext()), 2);
4864 return ABIArgInfo::getDirect(ResType);
4867 llvm::Type *ResType =
4868 llvm::VectorType::get(llvm::Type::getInt32Ty(getVMContext()), 4);
4869 return ABIArgInfo::getDirect(ResType);
4871 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
4874 if (!isAggregateTypeForABI(Ty)) {
4875 // Treat an enum type as its underlying type.
4876 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
4877 Ty = EnumTy->getDecl()->getIntegerType();
4879 return (Ty->isPromotableIntegerType() && isDarwinPCS()
4880 ? ABIArgInfo::getExtend()
4881 : ABIArgInfo::getDirect());
4884 // Structures with either a non-trivial destructor or a non-trivial
4885 // copy constructor are always indirect.
4886 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) {
4887 return getNaturalAlignIndirect(Ty, /*ByVal=*/RAA ==
4888 CGCXXABI::RAA_DirectInMemory);
4891 // Empty records are always ignored on Darwin, but actually passed in C++ mode
4892 // elsewhere for GNU compatibility.
4893 uint64_t Size = getContext().getTypeSize(Ty);
4894 bool IsEmpty = isEmptyRecord(getContext(), Ty, true);
4895 if (IsEmpty || Size == 0) {
4896 if (!getContext().getLangOpts().CPlusPlus || isDarwinPCS())
4897 return ABIArgInfo::getIgnore();
4899 // GNU C mode. The only argument that gets ignored is an empty one with size
4901 if (IsEmpty && Size == 0)
4902 return ABIArgInfo::getIgnore();
4903 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
4906 // Homogeneous Floating-point Aggregates (HFAs) need to be expanded.
4907 const Type *Base = nullptr;
4908 uint64_t Members = 0;
4909 if (isHomogeneousAggregate(Ty, Base, Members)) {
4910 return ABIArgInfo::getDirect(
4911 llvm::ArrayType::get(CGT.ConvertType(QualType(Base, 0)), Members));
4914 // Aggregates <= 16 bytes are passed directly in registers or on the stack.
4916 // On RenderScript, coerce Aggregates <= 16 bytes to an integer array of
4917 // same size and alignment.
4918 if (getTarget().isRenderScriptTarget()) {
4919 return coerceToIntArray(Ty, getContext(), getVMContext());
4921 unsigned Alignment = getContext().getTypeAlign(Ty);
4922 Size = llvm::alignTo(Size, 64); // round up to multiple of 8 bytes
4924 // We use a pair of i64 for 16-byte aggregate with 8-byte alignment.
4925 // For aggregates with 16-byte alignment, we use i128.
4926 if (Alignment < 128 && Size == 128) {
4927 llvm::Type *BaseTy = llvm::Type::getInt64Ty(getVMContext());
4928 return ABIArgInfo::getDirect(llvm::ArrayType::get(BaseTy, Size / 64));
4930 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Size));
4933 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
4936 ABIArgInfo AArch64ABIInfo::classifyReturnType(QualType RetTy) const {
4937 if (RetTy->isVoidType())
4938 return ABIArgInfo::getIgnore();
4940 // Large vector types should be returned via memory.
4941 if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 128)
4942 return getNaturalAlignIndirect(RetTy);
4944 if (!isAggregateTypeForABI(RetTy)) {
4945 // Treat an enum type as its underlying type.
4946 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
4947 RetTy = EnumTy->getDecl()->getIntegerType();
4949 return (RetTy->isPromotableIntegerType() && isDarwinPCS()
4950 ? ABIArgInfo::getExtend()
4951 : ABIArgInfo::getDirect());
4954 uint64_t Size = getContext().getTypeSize(RetTy);
4955 if (isEmptyRecord(getContext(), RetTy, true) || Size == 0)
4956 return ABIArgInfo::getIgnore();
4958 const Type *Base = nullptr;
4959 uint64_t Members = 0;
4960 if (isHomogeneousAggregate(RetTy, Base, Members))
4961 // Homogeneous Floating-point Aggregates (HFAs) are returned directly.
4962 return ABIArgInfo::getDirect();
4964 // Aggregates <= 16 bytes are returned directly in registers or on the stack.
4966 // On RenderScript, coerce Aggregates <= 16 bytes to an integer array of
4967 // same size and alignment.
4968 if (getTarget().isRenderScriptTarget()) {
4969 return coerceToIntArray(RetTy, getContext(), getVMContext());
4971 unsigned Alignment = getContext().getTypeAlign(RetTy);
4972 Size = llvm::alignTo(Size, 64); // round up to multiple of 8 bytes
4974 // We use a pair of i64 for 16-byte aggregate with 8-byte alignment.
4975 // For aggregates with 16-byte alignment, we use i128.
4976 if (Alignment < 128 && Size == 128) {
4977 llvm::Type *BaseTy = llvm::Type::getInt64Ty(getVMContext());
4978 return ABIArgInfo::getDirect(llvm::ArrayType::get(BaseTy, Size / 64));
4980 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Size));
4983 return getNaturalAlignIndirect(RetTy);
4986 /// isIllegalVectorType - check whether the vector type is legal for AArch64.
4987 bool AArch64ABIInfo::isIllegalVectorType(QualType Ty) const {
4988 if (const VectorType *VT = Ty->getAs<VectorType>()) {
4989 // Check whether VT is legal.
4990 unsigned NumElements = VT->getNumElements();
4991 uint64_t Size = getContext().getTypeSize(VT);
4992 // NumElements should be power of 2.
4993 if (!llvm::isPowerOf2_32(NumElements))
4995 return Size != 64 && (Size != 128 || NumElements == 1);
5000 bool AArch64ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
5001 // Homogeneous aggregates for AAPCS64 must have base types of a floating
5002 // point type or a short-vector type. This is the same as the 32-bit ABI,
5003 // but with the difference that any floating-point type is allowed,
5004 // including __fp16.
5005 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
5006 if (BT->isFloatingPoint())
5008 } else if (const VectorType *VT = Ty->getAs<VectorType>()) {
5009 unsigned VecSize = getContext().getTypeSize(VT);
5010 if (VecSize == 64 || VecSize == 128)
5016 bool AArch64ABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base,
5017 uint64_t Members) const {
5018 return Members <= 4;
5021 Address AArch64ABIInfo::EmitAAPCSVAArg(Address VAListAddr,
5023 CodeGenFunction &CGF) const {
5024 ABIArgInfo AI = classifyArgumentType(Ty);
5025 bool IsIndirect = AI.isIndirect();
5027 llvm::Type *BaseTy = CGF.ConvertType(Ty);
5029 BaseTy = llvm::PointerType::getUnqual(BaseTy);
5030 else if (AI.getCoerceToType())
5031 BaseTy = AI.getCoerceToType();
5033 unsigned NumRegs = 1;
5034 if (llvm::ArrayType *ArrTy = dyn_cast<llvm::ArrayType>(BaseTy)) {
5035 BaseTy = ArrTy->getElementType();
5036 NumRegs = ArrTy->getNumElements();
5038 bool IsFPR = BaseTy->isFloatingPointTy() || BaseTy->isVectorTy();
5040 // The AArch64 va_list type and handling is specified in the Procedure Call
5041 // Standard, section B.4:
5051 llvm::BasicBlock *MaybeRegBlock = CGF.createBasicBlock("vaarg.maybe_reg");
5052 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
5053 llvm::BasicBlock *OnStackBlock = CGF.createBasicBlock("vaarg.on_stack");
5054 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
5056 auto TyInfo = getContext().getTypeInfoInChars(Ty);
5057 CharUnits TyAlign = TyInfo.second;
5059 Address reg_offs_p = Address::invalid();
5060 llvm::Value *reg_offs = nullptr;
5062 CharUnits reg_top_offset;
5063 int RegSize = IsIndirect ? 8 : TyInfo.first.getQuantity();
5065 // 3 is the field number of __gr_offs
5067 CGF.Builder.CreateStructGEP(VAListAddr, 3, CharUnits::fromQuantity(24),
5069 reg_offs = CGF.Builder.CreateLoad(reg_offs_p, "gr_offs");
5070 reg_top_index = 1; // field number for __gr_top
5071 reg_top_offset = CharUnits::fromQuantity(8);
5072 RegSize = llvm::alignTo(RegSize, 8);
5074 // 4 is the field number of __vr_offs.
5076 CGF.Builder.CreateStructGEP(VAListAddr, 4, CharUnits::fromQuantity(28),
5078 reg_offs = CGF.Builder.CreateLoad(reg_offs_p, "vr_offs");
5079 reg_top_index = 2; // field number for __vr_top
5080 reg_top_offset = CharUnits::fromQuantity(16);
5081 RegSize = 16 * NumRegs;
5084 //=======================================
5085 // Find out where argument was passed
5086 //=======================================
5088 // If reg_offs >= 0 we're already using the stack for this type of
5089 // argument. We don't want to keep updating reg_offs (in case it overflows,
5090 // though anyone passing 2GB of arguments, each at most 16 bytes, deserves
5091 // whatever they get).
5092 llvm::Value *UsingStack = nullptr;
5093 UsingStack = CGF.Builder.CreateICmpSGE(
5094 reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, 0));
5096 CGF.Builder.CreateCondBr(UsingStack, OnStackBlock, MaybeRegBlock);
5098 // Otherwise, at least some kind of argument could go in these registers, the
5099 // question is whether this particular type is too big.
5100 CGF.EmitBlock(MaybeRegBlock);
5102 // Integer arguments may need to correct register alignment (for example a
5103 // "struct { __int128 a; };" gets passed in x_2N, x_{2N+1}). In this case we
5104 // align __gr_offs to calculate the potential address.
5105 if (!IsFPR && !IsIndirect && TyAlign.getQuantity() > 8) {
5106 int Align = TyAlign.getQuantity();
5108 reg_offs = CGF.Builder.CreateAdd(
5109 reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, Align - 1),
5111 reg_offs = CGF.Builder.CreateAnd(
5112 reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, -Align),
5116 // Update the gr_offs/vr_offs pointer for next call to va_arg on this va_list.
5117 // The fact that this is done unconditionally reflects the fact that
5118 // allocating an argument to the stack also uses up all the remaining
5119 // registers of the appropriate kind.
5120 llvm::Value *NewOffset = nullptr;
5121 NewOffset = CGF.Builder.CreateAdd(
5122 reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, RegSize), "new_reg_offs");
5123 CGF.Builder.CreateStore(NewOffset, reg_offs_p);
5125 // Now we're in a position to decide whether this argument really was in
5126 // registers or not.
5127 llvm::Value *InRegs = nullptr;
5128 InRegs = CGF.Builder.CreateICmpSLE(
5129 NewOffset, llvm::ConstantInt::get(CGF.Int32Ty, 0), "inreg");
5131 CGF.Builder.CreateCondBr(InRegs, InRegBlock, OnStackBlock);
5133 //=======================================
5134 // Argument was in registers
5135 //=======================================
5137 // Now we emit the code for if the argument was originally passed in
5138 // registers. First start the appropriate block:
5139 CGF.EmitBlock(InRegBlock);
5141 llvm::Value *reg_top = nullptr;
5142 Address reg_top_p = CGF.Builder.CreateStructGEP(VAListAddr, reg_top_index,
5143 reg_top_offset, "reg_top_p");
5144 reg_top = CGF.Builder.CreateLoad(reg_top_p, "reg_top");
5145 Address BaseAddr(CGF.Builder.CreateInBoundsGEP(reg_top, reg_offs),
5146 CharUnits::fromQuantity(IsFPR ? 16 : 8));
5147 Address RegAddr = Address::invalid();
5148 llvm::Type *MemTy = CGF.ConvertTypeForMem(Ty);
5151 // If it's been passed indirectly (actually a struct), whatever we find from
5152 // stored registers or on the stack will actually be a struct **.
5153 MemTy = llvm::PointerType::getUnqual(MemTy);
5156 const Type *Base = nullptr;
5157 uint64_t NumMembers = 0;
5158 bool IsHFA = isHomogeneousAggregate(Ty, Base, NumMembers);
5159 if (IsHFA && NumMembers > 1) {
5160 // Homogeneous aggregates passed in registers will have their elements split
5161 // and stored 16-bytes apart regardless of size (they're notionally in qN,
5162 // qN+1, ...). We reload and store into a temporary local variable
5164 assert(!IsIndirect && "Homogeneous aggregates should be passed directly");
5165 auto BaseTyInfo = getContext().getTypeInfoInChars(QualType(Base, 0));
5166 llvm::Type *BaseTy = CGF.ConvertType(QualType(Base, 0));
5167 llvm::Type *HFATy = llvm::ArrayType::get(BaseTy, NumMembers);
5168 Address Tmp = CGF.CreateTempAlloca(HFATy,
5169 std::max(TyAlign, BaseTyInfo.second));
5171 // On big-endian platforms, the value will be right-aligned in its slot.
5173 if (CGF.CGM.getDataLayout().isBigEndian() &&
5174 BaseTyInfo.first.getQuantity() < 16)
5175 Offset = 16 - BaseTyInfo.first.getQuantity();
5177 for (unsigned i = 0; i < NumMembers; ++i) {
5178 CharUnits BaseOffset = CharUnits::fromQuantity(16 * i + Offset);
5180 CGF.Builder.CreateConstInBoundsByteGEP(BaseAddr, BaseOffset);
5181 LoadAddr = CGF.Builder.CreateElementBitCast(LoadAddr, BaseTy);
5184 CGF.Builder.CreateConstArrayGEP(Tmp, i, BaseTyInfo.first);
5186 llvm::Value *Elem = CGF.Builder.CreateLoad(LoadAddr);
5187 CGF.Builder.CreateStore(Elem, StoreAddr);
5190 RegAddr = CGF.Builder.CreateElementBitCast(Tmp, MemTy);
5192 // Otherwise the object is contiguous in memory.
5194 // It might be right-aligned in its slot.
5195 CharUnits SlotSize = BaseAddr.getAlignment();
5196 if (CGF.CGM.getDataLayout().isBigEndian() && !IsIndirect &&
5197 (IsHFA || !isAggregateTypeForABI(Ty)) &&
5198 TyInfo.first < SlotSize) {
5199 CharUnits Offset = SlotSize - TyInfo.first;
5200 BaseAddr = CGF.Builder.CreateConstInBoundsByteGEP(BaseAddr, Offset);
5203 RegAddr = CGF.Builder.CreateElementBitCast(BaseAddr, MemTy);
5206 CGF.EmitBranch(ContBlock);
5208 //=======================================
5209 // Argument was on the stack
5210 //=======================================
5211 CGF.EmitBlock(OnStackBlock);
5213 Address stack_p = CGF.Builder.CreateStructGEP(VAListAddr, 0,
5214 CharUnits::Zero(), "stack_p");
5215 llvm::Value *OnStackPtr = CGF.Builder.CreateLoad(stack_p, "stack");
5217 // Again, stack arguments may need realignment. In this case both integer and
5218 // floating-point ones might be affected.
5219 if (!IsIndirect && TyAlign.getQuantity() > 8) {
5220 int Align = TyAlign.getQuantity();
5222 OnStackPtr = CGF.Builder.CreatePtrToInt(OnStackPtr, CGF.Int64Ty);
5224 OnStackPtr = CGF.Builder.CreateAdd(
5225 OnStackPtr, llvm::ConstantInt::get(CGF.Int64Ty, Align - 1),
5227 OnStackPtr = CGF.Builder.CreateAnd(
5228 OnStackPtr, llvm::ConstantInt::get(CGF.Int64Ty, -Align),
5231 OnStackPtr = CGF.Builder.CreateIntToPtr(OnStackPtr, CGF.Int8PtrTy);
5233 Address OnStackAddr(OnStackPtr,
5234 std::max(CharUnits::fromQuantity(8), TyAlign));
5236 // All stack slots are multiples of 8 bytes.
5237 CharUnits StackSlotSize = CharUnits::fromQuantity(8);
5238 CharUnits StackSize;
5240 StackSize = StackSlotSize;
5242 StackSize = TyInfo.first.alignTo(StackSlotSize);
5244 llvm::Value *StackSizeC = CGF.Builder.getSize(StackSize);
5245 llvm::Value *NewStack =
5246 CGF.Builder.CreateInBoundsGEP(OnStackPtr, StackSizeC, "new_stack");
5248 // Write the new value of __stack for the next call to va_arg
5249 CGF.Builder.CreateStore(NewStack, stack_p);
5251 if (CGF.CGM.getDataLayout().isBigEndian() && !isAggregateTypeForABI(Ty) &&
5252 TyInfo.first < StackSlotSize) {
5253 CharUnits Offset = StackSlotSize - TyInfo.first;
5254 OnStackAddr = CGF.Builder.CreateConstInBoundsByteGEP(OnStackAddr, Offset);
5257 OnStackAddr = CGF.Builder.CreateElementBitCast(OnStackAddr, MemTy);
5259 CGF.EmitBranch(ContBlock);
5261 //=======================================
5263 //=======================================
5264 CGF.EmitBlock(ContBlock);
5266 Address ResAddr = emitMergePHI(CGF, RegAddr, InRegBlock,
5267 OnStackAddr, OnStackBlock, "vaargs.addr");
5270 return Address(CGF.Builder.CreateLoad(ResAddr, "vaarg.addr"),
5276 Address AArch64ABIInfo::EmitDarwinVAArg(Address VAListAddr, QualType Ty,
5277 CodeGenFunction &CGF) const {
5278 // The backend's lowering doesn't support va_arg for aggregates or
5279 // illegal vector types. Lower VAArg here for these cases and use
5280 // the LLVM va_arg instruction for everything else.
5281 if (!isAggregateTypeForABI(Ty) && !isIllegalVectorType(Ty))
5282 return EmitVAArgInstr(CGF, VAListAddr, Ty, ABIArgInfo::getDirect());
5284 CharUnits SlotSize = CharUnits::fromQuantity(8);
5286 // Empty records are ignored for parameter passing purposes.
5287 if (isEmptyRecord(getContext(), Ty, true)) {
5288 Address Addr(CGF.Builder.CreateLoad(VAListAddr, "ap.cur"), SlotSize);
5289 Addr = CGF.Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(Ty));
5293 // The size of the actual thing passed, which might end up just
5294 // being a pointer for indirect types.
5295 auto TyInfo = getContext().getTypeInfoInChars(Ty);
5297 // Arguments bigger than 16 bytes which aren't homogeneous
5298 // aggregates should be passed indirectly.
5299 bool IsIndirect = false;
5300 if (TyInfo.first.getQuantity() > 16) {
5301 const Type *Base = nullptr;
5302 uint64_t Members = 0;
5303 IsIndirect = !isHomogeneousAggregate(Ty, Base, Members);
5306 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect,
5307 TyInfo, SlotSize, /*AllowHigherAlign*/ true);
5310 //===----------------------------------------------------------------------===//
5311 // ARM ABI Implementation
5312 //===----------------------------------------------------------------------===//
5316 class ARMABIInfo : public SwiftABIInfo {
5329 ARMABIInfo(CodeGenTypes &CGT, ABIKind _Kind)
5330 : SwiftABIInfo(CGT), Kind(_Kind) {
5334 bool isEABI() const {
5335 switch (getTarget().getTriple().getEnvironment()) {
5336 case llvm::Triple::Android:
5337 case llvm::Triple::EABI:
5338 case llvm::Triple::EABIHF:
5339 case llvm::Triple::GNUEABI:
5340 case llvm::Triple::GNUEABIHF:
5341 case llvm::Triple::MuslEABI:
5342 case llvm::Triple::MuslEABIHF:
5349 bool isEABIHF() const {
5350 switch (getTarget().getTriple().getEnvironment()) {
5351 case llvm::Triple::EABIHF:
5352 case llvm::Triple::GNUEABIHF:
5353 case llvm::Triple::MuslEABIHF:
5360 ABIKind getABIKind() const { return Kind; }
5363 ABIArgInfo classifyReturnType(QualType RetTy, bool isVariadic) const;
5364 ABIArgInfo classifyArgumentType(QualType RetTy, bool isVariadic) const;
5365 bool isIllegalVectorType(QualType Ty) const;
5367 bool isHomogeneousAggregateBaseType(QualType Ty) const override;
5368 bool isHomogeneousAggregateSmallEnough(const Type *Ty,
5369 uint64_t Members) const override;
5371 void computeInfo(CGFunctionInfo &FI) const override;
5373 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
5374 QualType Ty) const override;
5376 llvm::CallingConv::ID getLLVMDefaultCC() const;
5377 llvm::CallingConv::ID getABIDefaultCC() const;
5380 bool shouldPassIndirectlyForSwift(CharUnits totalSize,
5381 ArrayRef<llvm::Type*> scalars,
5382 bool asReturnValue) const override {
5383 return occupiesMoreThan(CGT, scalars, /*total*/ 4);
5385 bool isSwiftErrorInRegister() const override {
5390 class ARMTargetCodeGenInfo : public TargetCodeGenInfo {
5392 ARMTargetCodeGenInfo(CodeGenTypes &CGT, ARMABIInfo::ABIKind K)
5393 :TargetCodeGenInfo(new ARMABIInfo(CGT, K)) {}
5395 const ARMABIInfo &getABIInfo() const {
5396 return static_cast<const ARMABIInfo&>(TargetCodeGenInfo::getABIInfo());
5399 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
5403 StringRef getARCRetainAutoreleasedReturnValueMarker() const override {
5404 return "mov\tr7, r7\t\t@ marker for objc_retainAutoreleaseReturnValue";
5407 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
5408 llvm::Value *Address) const override {
5409 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4);
5411 // 0-15 are the 16 integer registers.
5412 AssignToArrayRange(CGF.Builder, Address, Four8, 0, 15);
5416 unsigned getSizeOfUnwindException() const override {
5417 if (getABIInfo().isEABI()) return 88;
5418 return TargetCodeGenInfo::getSizeOfUnwindException();
5421 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
5422 CodeGen::CodeGenModule &CGM) const override {
5423 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
5427 const ARMInterruptAttr *Attr = FD->getAttr<ARMInterruptAttr>();
5432 switch (Attr->getInterrupt()) {
5433 case ARMInterruptAttr::Generic: Kind = ""; break;
5434 case ARMInterruptAttr::IRQ: Kind = "IRQ"; break;
5435 case ARMInterruptAttr::FIQ: Kind = "FIQ"; break;
5436 case ARMInterruptAttr::SWI: Kind = "SWI"; break;
5437 case ARMInterruptAttr::ABORT: Kind = "ABORT"; break;
5438 case ARMInterruptAttr::UNDEF: Kind = "UNDEF"; break;
5441 llvm::Function *Fn = cast<llvm::Function>(GV);
5443 Fn->addFnAttr("interrupt", Kind);
5445 ARMABIInfo::ABIKind ABI = cast<ARMABIInfo>(getABIInfo()).getABIKind();
5446 if (ABI == ARMABIInfo::APCS)
5449 // AAPCS guarantees that sp will be 8-byte aligned on any public interface,
5450 // however this is not necessarily true on taking any interrupt. Instruct
5451 // the backend to perform a realignment as part of the function prologue.
5452 llvm::AttrBuilder B;
5453 B.addStackAlignmentAttr(8);
5454 Fn->addAttributes(llvm::AttributeList::FunctionIndex, B);
5458 class WindowsARMTargetCodeGenInfo : public ARMTargetCodeGenInfo {
5460 WindowsARMTargetCodeGenInfo(CodeGenTypes &CGT, ARMABIInfo::ABIKind K)
5461 : ARMTargetCodeGenInfo(CGT, K) {}
5463 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
5464 CodeGen::CodeGenModule &CGM) const override;
5466 void getDependentLibraryOption(llvm::StringRef Lib,
5467 llvm::SmallString<24> &Opt) const override {
5468 Opt = "/DEFAULTLIB:" + qualifyWindowsLibrary(Lib);
5471 void getDetectMismatchOption(llvm::StringRef Name, llvm::StringRef Value,
5472 llvm::SmallString<32> &Opt) const override {
5473 Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\"";
5477 void WindowsARMTargetCodeGenInfo::setTargetAttributes(
5478 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const {
5479 ARMTargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
5480 addStackProbeSizeTargetAttribute(D, GV, CGM);
5484 void ARMABIInfo::computeInfo(CGFunctionInfo &FI) const {
5485 if (!getCXXABI().classifyReturnType(FI))
5486 FI.getReturnInfo() =
5487 classifyReturnType(FI.getReturnType(), FI.isVariadic());
5489 for (auto &I : FI.arguments())
5490 I.info = classifyArgumentType(I.type, FI.isVariadic());
5492 // Always honor user-specified calling convention.
5493 if (FI.getCallingConvention() != llvm::CallingConv::C)
5496 llvm::CallingConv::ID cc = getRuntimeCC();
5497 if (cc != llvm::CallingConv::C)
5498 FI.setEffectiveCallingConvention(cc);
5501 /// Return the default calling convention that LLVM will use.
5502 llvm::CallingConv::ID ARMABIInfo::getLLVMDefaultCC() const {
5503 // The default calling convention that LLVM will infer.
5504 if (isEABIHF() || getTarget().getTriple().isWatchABI())
5505 return llvm::CallingConv::ARM_AAPCS_VFP;
5507 return llvm::CallingConv::ARM_AAPCS;
5509 return llvm::CallingConv::ARM_APCS;
5512 /// Return the calling convention that our ABI would like us to use
5513 /// as the C calling convention.
5514 llvm::CallingConv::ID ARMABIInfo::getABIDefaultCC() const {
5515 switch (getABIKind()) {
5516 case APCS: return llvm::CallingConv::ARM_APCS;
5517 case AAPCS: return llvm::CallingConv::ARM_AAPCS;
5518 case AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP;
5519 case AAPCS16_VFP: return llvm::CallingConv::ARM_AAPCS_VFP;
5521 llvm_unreachable("bad ABI kind");
5524 void ARMABIInfo::setCCs() {
5525 assert(getRuntimeCC() == llvm::CallingConv::C);
5527 // Don't muddy up the IR with a ton of explicit annotations if
5528 // they'd just match what LLVM will infer from the triple.
5529 llvm::CallingConv::ID abiCC = getABIDefaultCC();
5530 if (abiCC != getLLVMDefaultCC())
5533 // AAPCS apparently requires runtime support functions to be soft-float, but
5534 // that's almost certainly for historic reasons (Thumb1 not supporting VFP
5535 // most likely). It's more convenient for AAPCS16_VFP to be hard-float.
5536 switch (getABIKind()) {
5539 if (abiCC != getLLVMDefaultCC())
5544 BuiltinCC = llvm::CallingConv::ARM_AAPCS;
5549 ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty,
5550 bool isVariadic) const {
5551 // 6.1.2.1 The following argument types are VFP CPRCs:
5552 // A single-precision floating-point type (including promoted
5553 // half-precision types); A double-precision floating-point type;
5554 // A 64-bit or 128-bit containerized vector type; Homogeneous Aggregate
5555 // with a Base Type of a single- or double-precision floating-point type,
5556 // 64-bit containerized vectors or 128-bit containerized vectors with one
5557 // to four Elements.
5558 bool IsEffectivelyAAPCS_VFP = getABIKind() == AAPCS_VFP && !isVariadic;
5560 Ty = useFirstFieldIfTransparentUnion(Ty);
5562 // Handle illegal vector types here.
5563 if (isIllegalVectorType(Ty)) {
5564 uint64_t Size = getContext().getTypeSize(Ty);
5566 llvm::Type *ResType =
5567 llvm::Type::getInt32Ty(getVMContext());
5568 return ABIArgInfo::getDirect(ResType);
5571 llvm::Type *ResType = llvm::VectorType::get(
5572 llvm::Type::getInt32Ty(getVMContext()), 2);
5573 return ABIArgInfo::getDirect(ResType);
5576 llvm::Type *ResType = llvm::VectorType::get(
5577 llvm::Type::getInt32Ty(getVMContext()), 4);
5578 return ABIArgInfo::getDirect(ResType);
5580 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
5583 // __fp16 gets passed as if it were an int or float, but with the top 16 bits
5584 // unspecified. This is not done for OpenCL as it handles the half type
5585 // natively, and does not need to interwork with AAPCS code.
5586 if (Ty->isHalfType() && !getContext().getLangOpts().NativeHalfArgsAndReturns) {
5587 llvm::Type *ResType = IsEffectivelyAAPCS_VFP ?
5588 llvm::Type::getFloatTy(getVMContext()) :
5589 llvm::Type::getInt32Ty(getVMContext());
5590 return ABIArgInfo::getDirect(ResType);
5593 if (!isAggregateTypeForABI(Ty)) {
5594 // Treat an enum type as its underlying type.
5595 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) {
5596 Ty = EnumTy->getDecl()->getIntegerType();
5599 return (Ty->isPromotableIntegerType() ? ABIArgInfo::getExtend()
5600 : ABIArgInfo::getDirect());
5603 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) {
5604 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
5607 // Ignore empty records.
5608 if (isEmptyRecord(getContext(), Ty, true))
5609 return ABIArgInfo::getIgnore();
5611 if (IsEffectivelyAAPCS_VFP) {
5612 // Homogeneous Aggregates need to be expanded when we can fit the aggregate
5613 // into VFP registers.
5614 const Type *Base = nullptr;
5615 uint64_t Members = 0;
5616 if (isHomogeneousAggregate(Ty, Base, Members)) {
5617 assert(Base && "Base class should be set for homogeneous aggregate");
5618 // Base can be a floating-point or a vector.
5619 return ABIArgInfo::getDirect(nullptr, 0, nullptr, false);
5621 } else if (getABIKind() == ARMABIInfo::AAPCS16_VFP) {
5622 // WatchOS does have homogeneous aggregates. Note that we intentionally use
5623 // this convention even for a variadic function: the backend will use GPRs
5625 const Type *Base = nullptr;
5626 uint64_t Members = 0;
5627 if (isHomogeneousAggregate(Ty, Base, Members)) {
5628 assert(Base && Members <= 4 && "unexpected homogeneous aggregate");
5630 llvm::ArrayType::get(CGT.ConvertType(QualType(Base, 0)), Members);
5631 return ABIArgInfo::getDirect(Ty, 0, nullptr, false);
5635 if (getABIKind() == ARMABIInfo::AAPCS16_VFP &&
5636 getContext().getTypeSizeInChars(Ty) > CharUnits::fromQuantity(16)) {
5637 // WatchOS is adopting the 64-bit AAPCS rule on composite types: if they're
5638 // bigger than 128-bits, they get placed in space allocated by the caller,
5639 // and a pointer is passed.
5640 return ABIArgInfo::getIndirect(
5641 CharUnits::fromQuantity(getContext().getTypeAlign(Ty) / 8), false);
5644 // Support byval for ARM.
5645 // The ABI alignment for APCS is 4-byte and for AAPCS at least 4-byte and at
5646 // most 8-byte. We realign the indirect argument if type alignment is bigger
5647 // than ABI alignment.
5648 uint64_t ABIAlign = 4;
5649 uint64_t TyAlign = getContext().getTypeAlign(Ty) / 8;
5650 if (getABIKind() == ARMABIInfo::AAPCS_VFP ||
5651 getABIKind() == ARMABIInfo::AAPCS)
5652 ABIAlign = std::min(std::max(TyAlign, (uint64_t)4), (uint64_t)8);
5654 if (getContext().getTypeSizeInChars(Ty) > CharUnits::fromQuantity(64)) {
5655 assert(getABIKind() != ARMABIInfo::AAPCS16_VFP && "unexpected byval");
5656 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(ABIAlign),
5658 /*Realign=*/TyAlign > ABIAlign);
5661 // On RenderScript, coerce Aggregates <= 64 bytes to an integer array of
5662 // same size and alignment.
5663 if (getTarget().isRenderScriptTarget()) {
5664 return coerceToIntArray(Ty, getContext(), getVMContext());
5667 // Otherwise, pass by coercing to a structure of the appropriate size.
5670 // FIXME: Try to match the types of the arguments more accurately where
5672 if (getContext().getTypeAlign(Ty) <= 32) {
5673 ElemTy = llvm::Type::getInt32Ty(getVMContext());
5674 SizeRegs = (getContext().getTypeSize(Ty) + 31) / 32;
5676 ElemTy = llvm::Type::getInt64Ty(getVMContext());
5677 SizeRegs = (getContext().getTypeSize(Ty) + 63) / 64;
5680 return ABIArgInfo::getDirect(llvm::ArrayType::get(ElemTy, SizeRegs));
5683 static bool isIntegerLikeType(QualType Ty, ASTContext &Context,
5684 llvm::LLVMContext &VMContext) {
5685 // APCS, C Language Calling Conventions, Non-Simple Return Values: A structure
5686 // is called integer-like if its size is less than or equal to one word, and
5687 // the offset of each of its addressable sub-fields is zero.
5689 uint64_t Size = Context.getTypeSize(Ty);
5691 // Check that the type fits in a word.
5695 // FIXME: Handle vector types!
5696 if (Ty->isVectorType())
5699 // Float types are never treated as "integer like".
5700 if (Ty->isRealFloatingType())
5703 // If this is a builtin or pointer type then it is ok.
5704 if (Ty->getAs<BuiltinType>() || Ty->isPointerType())
5707 // Small complex integer types are "integer like".
5708 if (const ComplexType *CT = Ty->getAs<ComplexType>())
5709 return isIntegerLikeType(CT->getElementType(), Context, VMContext);
5711 // Single element and zero sized arrays should be allowed, by the definition
5712 // above, but they are not.
5714 // Otherwise, it must be a record type.
5715 const RecordType *RT = Ty->getAs<RecordType>();
5716 if (!RT) return false;
5718 // Ignore records with flexible arrays.
5719 const RecordDecl *RD = RT->getDecl();
5720 if (RD->hasFlexibleArrayMember())
5723 // Check that all sub-fields are at offset 0, and are themselves "integer
5725 const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
5727 bool HadField = false;
5729 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
5730 i != e; ++i, ++idx) {
5731 const FieldDecl *FD = *i;
5733 // Bit-fields are not addressable, we only need to verify they are "integer
5734 // like". We still have to disallow a subsequent non-bitfield, for example:
5735 // struct { int : 0; int x }
5736 // is non-integer like according to gcc.
5737 if (FD->isBitField()) {
5741 if (!isIntegerLikeType(FD->getType(), Context, VMContext))
5747 // Check if this field is at offset 0.
5748 if (Layout.getFieldOffset(idx) != 0)
5751 if (!isIntegerLikeType(FD->getType(), Context, VMContext))
5754 // Only allow at most one field in a structure. This doesn't match the
5755 // wording above, but follows gcc in situations with a field following an
5757 if (!RD->isUnion()) {
5768 ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy,
5769 bool isVariadic) const {
5770 bool IsEffectivelyAAPCS_VFP =
5771 (getABIKind() == AAPCS_VFP || getABIKind() == AAPCS16_VFP) && !isVariadic;
5773 if (RetTy->isVoidType())
5774 return ABIArgInfo::getIgnore();
5776 // Large vector types should be returned via memory.
5777 if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 128) {
5778 return getNaturalAlignIndirect(RetTy);
5781 // __fp16 gets returned as if it were an int or float, but with the top 16
5782 // bits unspecified. This is not done for OpenCL as it handles the half type
5783 // natively, and does not need to interwork with AAPCS code.
5784 if (RetTy->isHalfType() && !getContext().getLangOpts().NativeHalfArgsAndReturns) {
5785 llvm::Type *ResType = IsEffectivelyAAPCS_VFP ?
5786 llvm::Type::getFloatTy(getVMContext()) :
5787 llvm::Type::getInt32Ty(getVMContext());
5788 return ABIArgInfo::getDirect(ResType);
5791 if (!isAggregateTypeForABI(RetTy)) {
5792 // Treat an enum type as its underlying type.
5793 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
5794 RetTy = EnumTy->getDecl()->getIntegerType();
5796 return RetTy->isPromotableIntegerType() ? ABIArgInfo::getExtend()
5797 : ABIArgInfo::getDirect();
5800 // Are we following APCS?
5801 if (getABIKind() == APCS) {
5802 if (isEmptyRecord(getContext(), RetTy, false))
5803 return ABIArgInfo::getIgnore();
5805 // Complex types are all returned as packed integers.
5807 // FIXME: Consider using 2 x vector types if the back end handles them
5809 if (RetTy->isAnyComplexType())
5810 return ABIArgInfo::getDirect(llvm::IntegerType::get(
5811 getVMContext(), getContext().getTypeSize(RetTy)));
5813 // Integer like structures are returned in r0.
5814 if (isIntegerLikeType(RetTy, getContext(), getVMContext())) {
5815 // Return in the smallest viable integer type.
5816 uint64_t Size = getContext().getTypeSize(RetTy);
5818 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
5820 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
5821 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
5824 // Otherwise return in memory.
5825 return getNaturalAlignIndirect(RetTy);
5828 // Otherwise this is an AAPCS variant.
5830 if (isEmptyRecord(getContext(), RetTy, true))
5831 return ABIArgInfo::getIgnore();
5833 // Check for homogeneous aggregates with AAPCS-VFP.
5834 if (IsEffectivelyAAPCS_VFP) {
5835 const Type *Base = nullptr;
5836 uint64_t Members = 0;
5837 if (isHomogeneousAggregate(RetTy, Base, Members)) {
5838 assert(Base && "Base class should be set for homogeneous aggregate");
5839 // Homogeneous Aggregates are returned directly.
5840 return ABIArgInfo::getDirect(nullptr, 0, nullptr, false);
5844 // Aggregates <= 4 bytes are returned in r0; other aggregates
5845 // are returned indirectly.
5846 uint64_t Size = getContext().getTypeSize(RetTy);
5848 // On RenderScript, coerce Aggregates <= 4 bytes to an integer array of
5849 // same size and alignment.
5850 if (getTarget().isRenderScriptTarget()) {
5851 return coerceToIntArray(RetTy, getContext(), getVMContext());
5853 if (getDataLayout().isBigEndian())
5854 // Return in 32 bit integer integer type (as if loaded by LDR, AAPCS 5.4)
5855 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
5857 // Return in the smallest viable integer type.
5859 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
5861 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
5862 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
5863 } else if (Size <= 128 && getABIKind() == AAPCS16_VFP) {
5864 llvm::Type *Int32Ty = llvm::Type::getInt32Ty(getVMContext());
5865 llvm::Type *CoerceTy =
5866 llvm::ArrayType::get(Int32Ty, llvm::alignTo(Size, 32) / 32);
5867 return ABIArgInfo::getDirect(CoerceTy);
5870 return getNaturalAlignIndirect(RetTy);
5873 /// isIllegalVector - check whether Ty is an illegal vector type.
5874 bool ARMABIInfo::isIllegalVectorType(QualType Ty) const {
5875 if (const VectorType *VT = Ty->getAs<VectorType> ()) {
5877 // Android shipped using Clang 3.1, which supported a slightly different
5878 // vector ABI. The primary differences were that 3-element vector types
5879 // were legal, and so were sub 32-bit vectors (i.e. <2 x i8>). This path
5880 // accepts that legacy behavior for Android only.
5881 // Check whether VT is legal.
5882 unsigned NumElements = VT->getNumElements();
5883 // NumElements should be power of 2 or equal to 3.
5884 if (!llvm::isPowerOf2_32(NumElements) && NumElements != 3)
5887 // Check whether VT is legal.
5888 unsigned NumElements = VT->getNumElements();
5889 uint64_t Size = getContext().getTypeSize(VT);
5890 // NumElements should be power of 2.
5891 if (!llvm::isPowerOf2_32(NumElements))
5893 // Size should be greater than 32 bits.
5900 bool ARMABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
5901 // Homogeneous aggregates for AAPCS-VFP must have base types of float,
5902 // double, or 64-bit or 128-bit vectors.
5903 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
5904 if (BT->getKind() == BuiltinType::Float ||
5905 BT->getKind() == BuiltinType::Double ||
5906 BT->getKind() == BuiltinType::LongDouble)
5908 } else if (const VectorType *VT = Ty->getAs<VectorType>()) {
5909 unsigned VecSize = getContext().getTypeSize(VT);
5910 if (VecSize == 64 || VecSize == 128)
5916 bool ARMABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base,
5917 uint64_t Members) const {
5918 return Members <= 4;
5921 Address ARMABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
5922 QualType Ty) const {
5923 CharUnits SlotSize = CharUnits::fromQuantity(4);
5925 // Empty records are ignored for parameter passing purposes.
5926 if (isEmptyRecord(getContext(), Ty, true)) {
5927 Address Addr(CGF.Builder.CreateLoad(VAListAddr), SlotSize);
5928 Addr = CGF.Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(Ty));
5932 auto TyInfo = getContext().getTypeInfoInChars(Ty);
5933 CharUnits TyAlignForABI = TyInfo.second;
5935 // Use indirect if size of the illegal vector is bigger than 16 bytes.
5936 bool IsIndirect = false;
5937 const Type *Base = nullptr;
5938 uint64_t Members = 0;
5939 if (TyInfo.first > CharUnits::fromQuantity(16) && isIllegalVectorType(Ty)) {
5942 // ARMv7k passes structs bigger than 16 bytes indirectly, in space
5943 // allocated by the caller.
5944 } else if (TyInfo.first > CharUnits::fromQuantity(16) &&
5945 getABIKind() == ARMABIInfo::AAPCS16_VFP &&
5946 !isHomogeneousAggregate(Ty, Base, Members)) {
5949 // Otherwise, bound the type's ABI alignment.
5950 // The ABI alignment for 64-bit or 128-bit vectors is 8 for AAPCS and 4 for
5951 // APCS. For AAPCS, the ABI alignment is at least 4-byte and at most 8-byte.
5952 // Our callers should be prepared to handle an under-aligned address.
5953 } else if (getABIKind() == ARMABIInfo::AAPCS_VFP ||
5954 getABIKind() == ARMABIInfo::AAPCS) {
5955 TyAlignForABI = std::max(TyAlignForABI, CharUnits::fromQuantity(4));
5956 TyAlignForABI = std::min(TyAlignForABI, CharUnits::fromQuantity(8));
5957 } else if (getABIKind() == ARMABIInfo::AAPCS16_VFP) {
5958 // ARMv7k allows type alignment up to 16 bytes.
5959 TyAlignForABI = std::max(TyAlignForABI, CharUnits::fromQuantity(4));
5960 TyAlignForABI = std::min(TyAlignForABI, CharUnits::fromQuantity(16));
5962 TyAlignForABI = CharUnits::fromQuantity(4);
5964 TyInfo.second = TyAlignForABI;
5966 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect, TyInfo,
5967 SlotSize, /*AllowHigherAlign*/ true);
5970 //===----------------------------------------------------------------------===//
5971 // NVPTX ABI Implementation
5972 //===----------------------------------------------------------------------===//
5976 class NVPTXABIInfo : public ABIInfo {
5978 NVPTXABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
5980 ABIArgInfo classifyReturnType(QualType RetTy) const;
5981 ABIArgInfo classifyArgumentType(QualType Ty) const;
5983 void computeInfo(CGFunctionInfo &FI) const override;
5984 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
5985 QualType Ty) const override;
5988 class NVPTXTargetCodeGenInfo : public TargetCodeGenInfo {
5990 NVPTXTargetCodeGenInfo(CodeGenTypes &CGT)
5991 : TargetCodeGenInfo(new NVPTXABIInfo(CGT)) {}
5993 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
5994 CodeGen::CodeGenModule &M) const override;
5996 // Adds a NamedMDNode with F, Name, and Operand as operands, and adds the
5997 // resulting MDNode to the nvvm.annotations MDNode.
5998 static void addNVVMMetadata(llvm::Function *F, StringRef Name, int Operand);
6001 ABIArgInfo NVPTXABIInfo::classifyReturnType(QualType RetTy) const {
6002 if (RetTy->isVoidType())
6003 return ABIArgInfo::getIgnore();
6005 // note: this is different from default ABI
6006 if (!RetTy->isScalarType())
6007 return ABIArgInfo::getDirect();
6009 // Treat an enum type as its underlying type.
6010 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
6011 RetTy = EnumTy->getDecl()->getIntegerType();
6013 return (RetTy->isPromotableIntegerType() ?
6014 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
6017 ABIArgInfo NVPTXABIInfo::classifyArgumentType(QualType Ty) const {
6018 // Treat an enum type as its underlying type.
6019 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
6020 Ty = EnumTy->getDecl()->getIntegerType();
6022 // Return aggregates type as indirect by value
6023 if (isAggregateTypeForABI(Ty))
6024 return getNaturalAlignIndirect(Ty, /* byval */ true);
6026 return (Ty->isPromotableIntegerType() ?
6027 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
6030 void NVPTXABIInfo::computeInfo(CGFunctionInfo &FI) const {
6031 if (!getCXXABI().classifyReturnType(FI))
6032 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
6033 for (auto &I : FI.arguments())
6034 I.info = classifyArgumentType(I.type);
6036 // Always honor user-specified calling convention.
6037 if (FI.getCallingConvention() != llvm::CallingConv::C)
6040 FI.setEffectiveCallingConvention(getRuntimeCC());
6043 Address NVPTXABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
6044 QualType Ty) const {
6045 llvm_unreachable("NVPTX does not support varargs");
6048 void NVPTXTargetCodeGenInfo::
6049 setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
6050 CodeGen::CodeGenModule &M) const{
6051 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
6054 llvm::Function *F = cast<llvm::Function>(GV);
6056 // Perform special handling in OpenCL mode
6057 if (M.getLangOpts().OpenCL) {
6058 // Use OpenCL function attributes to check for kernel functions
6059 // By default, all functions are device functions
6060 if (FD->hasAttr<OpenCLKernelAttr>()) {
6061 // OpenCL __kernel functions get kernel metadata
6062 // Create !{<func-ref>, metadata !"kernel", i32 1} node
6063 addNVVMMetadata(F, "kernel", 1);
6064 // And kernel functions are not subject to inlining
6065 F->addFnAttr(llvm::Attribute::NoInline);
6069 // Perform special handling in CUDA mode.
6070 if (M.getLangOpts().CUDA) {
6071 // CUDA __global__ functions get a kernel metadata entry. Since
6072 // __global__ functions cannot be called from the device, we do not
6073 // need to set the noinline attribute.
6074 if (FD->hasAttr<CUDAGlobalAttr>()) {
6075 // Create !{<func-ref>, metadata !"kernel", i32 1} node
6076 addNVVMMetadata(F, "kernel", 1);
6078 if (CUDALaunchBoundsAttr *Attr = FD->getAttr<CUDALaunchBoundsAttr>()) {
6079 // Create !{<func-ref>, metadata !"maxntidx", i32 <val>} node
6080 llvm::APSInt MaxThreads(32);
6081 MaxThreads = Attr->getMaxThreads()->EvaluateKnownConstInt(M.getContext());
6083 addNVVMMetadata(F, "maxntidx", MaxThreads.getExtValue());
6085 // min blocks is an optional argument for CUDALaunchBoundsAttr. If it was
6086 // not specified in __launch_bounds__ or if the user specified a 0 value,
6087 // we don't have to add a PTX directive.
6088 if (Attr->getMinBlocks()) {
6089 llvm::APSInt MinBlocks(32);
6090 MinBlocks = Attr->getMinBlocks()->EvaluateKnownConstInt(M.getContext());
6092 // Create !{<func-ref>, metadata !"minctasm", i32 <val>} node
6093 addNVVMMetadata(F, "minctasm", MinBlocks.getExtValue());
6099 void NVPTXTargetCodeGenInfo::addNVVMMetadata(llvm::Function *F, StringRef Name,
6101 llvm::Module *M = F->getParent();
6102 llvm::LLVMContext &Ctx = M->getContext();
6104 // Get "nvvm.annotations" metadata node
6105 llvm::NamedMDNode *MD = M->getOrInsertNamedMetadata("nvvm.annotations");
6107 llvm::Metadata *MDVals[] = {
6108 llvm::ConstantAsMetadata::get(F), llvm::MDString::get(Ctx, Name),
6109 llvm::ConstantAsMetadata::get(
6110 llvm::ConstantInt::get(llvm::Type::getInt32Ty(Ctx), Operand))};
6111 // Append metadata to nvvm.annotations
6112 MD->addOperand(llvm::MDNode::get(Ctx, MDVals));
6116 //===----------------------------------------------------------------------===//
6117 // SystemZ ABI Implementation
6118 //===----------------------------------------------------------------------===//
6122 class SystemZABIInfo : public SwiftABIInfo {
6126 SystemZABIInfo(CodeGenTypes &CGT, bool HV)
6127 : SwiftABIInfo(CGT), HasVector(HV) {}
6129 bool isPromotableIntegerType(QualType Ty) const;
6130 bool isCompoundType(QualType Ty) const;
6131 bool isVectorArgumentType(QualType Ty) const;
6132 bool isFPArgumentType(QualType Ty) const;
6133 QualType GetSingleElementType(QualType Ty) const;
6135 ABIArgInfo classifyReturnType(QualType RetTy) const;
6136 ABIArgInfo classifyArgumentType(QualType ArgTy) const;
6138 void computeInfo(CGFunctionInfo &FI) const override {
6139 if (!getCXXABI().classifyReturnType(FI))
6140 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
6141 for (auto &I : FI.arguments())
6142 I.info = classifyArgumentType(I.type);
6145 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
6146 QualType Ty) const override;
6148 bool shouldPassIndirectlyForSwift(CharUnits totalSize,
6149 ArrayRef<llvm::Type*> scalars,
6150 bool asReturnValue) const override {
6151 return occupiesMoreThan(CGT, scalars, /*total*/ 4);
6153 bool isSwiftErrorInRegister() const override {
6158 class SystemZTargetCodeGenInfo : public TargetCodeGenInfo {
6160 SystemZTargetCodeGenInfo(CodeGenTypes &CGT, bool HasVector)
6161 : TargetCodeGenInfo(new SystemZABIInfo(CGT, HasVector)) {}
6166 bool SystemZABIInfo::isPromotableIntegerType(QualType Ty) const {
6167 // Treat an enum type as its underlying type.
6168 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
6169 Ty = EnumTy->getDecl()->getIntegerType();
6171 // Promotable integer types are required to be promoted by the ABI.
6172 if (Ty->isPromotableIntegerType())
6175 // 32-bit values must also be promoted.
6176 if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
6177 switch (BT->getKind()) {
6178 case BuiltinType::Int:
6179 case BuiltinType::UInt:
6187 bool SystemZABIInfo::isCompoundType(QualType Ty) const {
6188 return (Ty->isAnyComplexType() ||
6189 Ty->isVectorType() ||
6190 isAggregateTypeForABI(Ty));
6193 bool SystemZABIInfo::isVectorArgumentType(QualType Ty) const {
6194 return (HasVector &&
6195 Ty->isVectorType() &&
6196 getContext().getTypeSize(Ty) <= 128);
6199 bool SystemZABIInfo::isFPArgumentType(QualType Ty) const {
6200 if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
6201 switch (BT->getKind()) {
6202 case BuiltinType::Float:
6203 case BuiltinType::Double:
6212 QualType SystemZABIInfo::GetSingleElementType(QualType Ty) const {
6213 if (const RecordType *RT = Ty->getAsStructureType()) {
6214 const RecordDecl *RD = RT->getDecl();
6217 // If this is a C++ record, check the bases first.
6218 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
6219 for (const auto &I : CXXRD->bases()) {
6220 QualType Base = I.getType();
6222 // Empty bases don't affect things either way.
6223 if (isEmptyRecord(getContext(), Base, true))
6226 if (!Found.isNull())
6228 Found = GetSingleElementType(Base);
6231 // Check the fields.
6232 for (const auto *FD : RD->fields()) {
6233 // For compatibility with GCC, ignore empty bitfields in C++ mode.
6234 // Unlike isSingleElementStruct(), empty structure and array fields
6235 // do count. So do anonymous bitfields that aren't zero-sized.
6236 if (getContext().getLangOpts().CPlusPlus &&
6237 FD->isBitField() && FD->getBitWidthValue(getContext()) == 0)
6240 // Unlike isSingleElementStruct(), arrays do not count.
6241 // Nested structures still do though.
6242 if (!Found.isNull())
6244 Found = GetSingleElementType(FD->getType());
6247 // Unlike isSingleElementStruct(), trailing padding is allowed.
6248 // An 8-byte aligned struct s { float f; } is passed as a double.
6249 if (!Found.isNull())
6256 Address SystemZABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
6257 QualType Ty) const {
6258 // Assume that va_list type is correct; should be pointer to LLVM type:
6262 // i8 *__overflow_arg_area;
6263 // i8 *__reg_save_area;
6266 // Every non-vector argument occupies 8 bytes and is passed by preference
6267 // in either GPRs or FPRs. Vector arguments occupy 8 or 16 bytes and are
6268 // always passed on the stack.
6269 Ty = getContext().getCanonicalType(Ty);
6270 auto TyInfo = getContext().getTypeInfoInChars(Ty);
6271 llvm::Type *ArgTy = CGF.ConvertTypeForMem(Ty);
6272 llvm::Type *DirectTy = ArgTy;
6273 ABIArgInfo AI = classifyArgumentType(Ty);
6274 bool IsIndirect = AI.isIndirect();
6275 bool InFPRs = false;
6276 bool IsVector = false;
6277 CharUnits UnpaddedSize;
6278 CharUnits DirectAlign;
6280 DirectTy = llvm::PointerType::getUnqual(DirectTy);
6281 UnpaddedSize = DirectAlign = CharUnits::fromQuantity(8);
6283 if (AI.getCoerceToType())
6284 ArgTy = AI.getCoerceToType();
6285 InFPRs = ArgTy->isFloatTy() || ArgTy->isDoubleTy();
6286 IsVector = ArgTy->isVectorTy();
6287 UnpaddedSize = TyInfo.first;
6288 DirectAlign = TyInfo.second;
6290 CharUnits PaddedSize = CharUnits::fromQuantity(8);
6291 if (IsVector && UnpaddedSize > PaddedSize)
6292 PaddedSize = CharUnits::fromQuantity(16);
6293 assert((UnpaddedSize <= PaddedSize) && "Invalid argument size.");
6295 CharUnits Padding = (PaddedSize - UnpaddedSize);
6297 llvm::Type *IndexTy = CGF.Int64Ty;
6298 llvm::Value *PaddedSizeV =
6299 llvm::ConstantInt::get(IndexTy, PaddedSize.getQuantity());
6302 // Work out the address of a vector argument on the stack.
6303 // Vector arguments are always passed in the high bits of a
6304 // single (8 byte) or double (16 byte) stack slot.
6305 Address OverflowArgAreaPtr =
6306 CGF.Builder.CreateStructGEP(VAListAddr, 2, CharUnits::fromQuantity(16),
6307 "overflow_arg_area_ptr");
6308 Address OverflowArgArea =
6309 Address(CGF.Builder.CreateLoad(OverflowArgAreaPtr, "overflow_arg_area"),
6312 CGF.Builder.CreateElementBitCast(OverflowArgArea, DirectTy, "mem_addr");
6314 // Update overflow_arg_area_ptr pointer
6315 llvm::Value *NewOverflowArgArea =
6316 CGF.Builder.CreateGEP(OverflowArgArea.getPointer(), PaddedSizeV,
6317 "overflow_arg_area");
6318 CGF.Builder.CreateStore(NewOverflowArgArea, OverflowArgAreaPtr);
6323 assert(PaddedSize.getQuantity() == 8);
6325 unsigned MaxRegs, RegCountField, RegSaveIndex;
6326 CharUnits RegPadding;
6328 MaxRegs = 4; // Maximum of 4 FPR arguments
6329 RegCountField = 1; // __fpr
6330 RegSaveIndex = 16; // save offset for f0
6331 RegPadding = CharUnits(); // floats are passed in the high bits of an FPR
6333 MaxRegs = 5; // Maximum of 5 GPR arguments
6334 RegCountField = 0; // __gpr
6335 RegSaveIndex = 2; // save offset for r2
6336 RegPadding = Padding; // values are passed in the low bits of a GPR
6339 Address RegCountPtr = CGF.Builder.CreateStructGEP(
6340 VAListAddr, RegCountField, RegCountField * CharUnits::fromQuantity(8),
6342 llvm::Value *RegCount = CGF.Builder.CreateLoad(RegCountPtr, "reg_count");
6343 llvm::Value *MaxRegsV = llvm::ConstantInt::get(IndexTy, MaxRegs);
6344 llvm::Value *InRegs = CGF.Builder.CreateICmpULT(RegCount, MaxRegsV,
6347 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
6348 llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem");
6349 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
6350 CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock);
6352 // Emit code to load the value if it was passed in registers.
6353 CGF.EmitBlock(InRegBlock);
6355 // Work out the address of an argument register.
6356 llvm::Value *ScaledRegCount =
6357 CGF.Builder.CreateMul(RegCount, PaddedSizeV, "scaled_reg_count");
6358 llvm::Value *RegBase =
6359 llvm::ConstantInt::get(IndexTy, RegSaveIndex * PaddedSize.getQuantity()
6360 + RegPadding.getQuantity());
6361 llvm::Value *RegOffset =
6362 CGF.Builder.CreateAdd(ScaledRegCount, RegBase, "reg_offset");
6363 Address RegSaveAreaPtr =
6364 CGF.Builder.CreateStructGEP(VAListAddr, 3, CharUnits::fromQuantity(24),
6365 "reg_save_area_ptr");
6366 llvm::Value *RegSaveArea =
6367 CGF.Builder.CreateLoad(RegSaveAreaPtr, "reg_save_area");
6368 Address RawRegAddr(CGF.Builder.CreateGEP(RegSaveArea, RegOffset,
6372 CGF.Builder.CreateElementBitCast(RawRegAddr, DirectTy, "reg_addr");
6374 // Update the register count
6375 llvm::Value *One = llvm::ConstantInt::get(IndexTy, 1);
6376 llvm::Value *NewRegCount =
6377 CGF.Builder.CreateAdd(RegCount, One, "reg_count");
6378 CGF.Builder.CreateStore(NewRegCount, RegCountPtr);
6379 CGF.EmitBranch(ContBlock);
6381 // Emit code to load the value if it was passed in memory.
6382 CGF.EmitBlock(InMemBlock);
6384 // Work out the address of a stack argument.
6385 Address OverflowArgAreaPtr = CGF.Builder.CreateStructGEP(
6386 VAListAddr, 2, CharUnits::fromQuantity(16), "overflow_arg_area_ptr");
6387 Address OverflowArgArea =
6388 Address(CGF.Builder.CreateLoad(OverflowArgAreaPtr, "overflow_arg_area"),
6390 Address RawMemAddr =
6391 CGF.Builder.CreateConstByteGEP(OverflowArgArea, Padding, "raw_mem_addr");
6393 CGF.Builder.CreateElementBitCast(RawMemAddr, DirectTy, "mem_addr");
6395 // Update overflow_arg_area_ptr pointer
6396 llvm::Value *NewOverflowArgArea =
6397 CGF.Builder.CreateGEP(OverflowArgArea.getPointer(), PaddedSizeV,
6398 "overflow_arg_area");
6399 CGF.Builder.CreateStore(NewOverflowArgArea, OverflowArgAreaPtr);
6400 CGF.EmitBranch(ContBlock);
6402 // Return the appropriate result.
6403 CGF.EmitBlock(ContBlock);
6404 Address ResAddr = emitMergePHI(CGF, RegAddr, InRegBlock,
6405 MemAddr, InMemBlock, "va_arg.addr");
6408 ResAddr = Address(CGF.Builder.CreateLoad(ResAddr, "indirect_arg"),
6414 ABIArgInfo SystemZABIInfo::classifyReturnType(QualType RetTy) const {
6415 if (RetTy->isVoidType())
6416 return ABIArgInfo::getIgnore();
6417 if (isVectorArgumentType(RetTy))
6418 return ABIArgInfo::getDirect();
6419 if (isCompoundType(RetTy) || getContext().getTypeSize(RetTy) > 64)
6420 return getNaturalAlignIndirect(RetTy);
6421 return (isPromotableIntegerType(RetTy) ?
6422 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
6425 ABIArgInfo SystemZABIInfo::classifyArgumentType(QualType Ty) const {
6426 // Handle the generic C++ ABI.
6427 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
6428 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
6430 // Integers and enums are extended to full register width.
6431 if (isPromotableIntegerType(Ty))
6432 return ABIArgInfo::getExtend();
6434 // Handle vector types and vector-like structure types. Note that
6435 // as opposed to float-like structure types, we do not allow any
6436 // padding for vector-like structures, so verify the sizes match.
6437 uint64_t Size = getContext().getTypeSize(Ty);
6438 QualType SingleElementTy = GetSingleElementType(Ty);
6439 if (isVectorArgumentType(SingleElementTy) &&
6440 getContext().getTypeSize(SingleElementTy) == Size)
6441 return ABIArgInfo::getDirect(CGT.ConvertType(SingleElementTy));
6443 // Values that are not 1, 2, 4 or 8 bytes in size are passed indirectly.
6444 if (Size != 8 && Size != 16 && Size != 32 && Size != 64)
6445 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
6447 // Handle small structures.
6448 if (const RecordType *RT = Ty->getAs<RecordType>()) {
6449 // Structures with flexible arrays have variable length, so really
6450 // fail the size test above.
6451 const RecordDecl *RD = RT->getDecl();
6452 if (RD->hasFlexibleArrayMember())
6453 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
6455 // The structure is passed as an unextended integer, a float, or a double.
6457 if (isFPArgumentType(SingleElementTy)) {
6458 assert(Size == 32 || Size == 64);
6460 PassTy = llvm::Type::getFloatTy(getVMContext());
6462 PassTy = llvm::Type::getDoubleTy(getVMContext());
6464 PassTy = llvm::IntegerType::get(getVMContext(), Size);
6465 return ABIArgInfo::getDirect(PassTy);
6468 // Non-structure compounds are passed indirectly.
6469 if (isCompoundType(Ty))
6470 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
6472 return ABIArgInfo::getDirect(nullptr);
6475 //===----------------------------------------------------------------------===//
6476 // MSP430 ABI Implementation
6477 //===----------------------------------------------------------------------===//
6481 class MSP430TargetCodeGenInfo : public TargetCodeGenInfo {
6483 MSP430TargetCodeGenInfo(CodeGenTypes &CGT)
6484 : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {}
6485 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
6486 CodeGen::CodeGenModule &M) const override;
6491 void MSP430TargetCodeGenInfo::setTargetAttributes(const Decl *D,
6492 llvm::GlobalValue *GV,
6493 CodeGen::CodeGenModule &M) const {
6494 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
6495 if (const MSP430InterruptAttr *attr = FD->getAttr<MSP430InterruptAttr>()) {
6496 // Handle 'interrupt' attribute:
6497 llvm::Function *F = cast<llvm::Function>(GV);
6499 // Step 1: Set ISR calling convention.
6500 F->setCallingConv(llvm::CallingConv::MSP430_INTR);
6502 // Step 2: Add attributes goodness.
6503 F->addFnAttr(llvm::Attribute::NoInline);
6505 // Step 3: Emit ISR vector alias.
6506 unsigned Num = attr->getNumber() / 2;
6507 llvm::GlobalAlias::create(llvm::Function::ExternalLinkage,
6508 "__isr_" + Twine(Num), F);
6513 //===----------------------------------------------------------------------===//
6514 // MIPS ABI Implementation. This works for both little-endian and
6515 // big-endian variants.
6516 //===----------------------------------------------------------------------===//
6519 class MipsABIInfo : public ABIInfo {
6521 unsigned MinABIStackAlignInBytes, StackAlignInBytes;
6522 void CoerceToIntArgs(uint64_t TySize,
6523 SmallVectorImpl<llvm::Type *> &ArgList) const;
6524 llvm::Type* HandleAggregates(QualType Ty, uint64_t TySize) const;
6525 llvm::Type* returnAggregateInRegs(QualType RetTy, uint64_t Size) const;
6526 llvm::Type* getPaddingType(uint64_t Align, uint64_t Offset) const;
6528 MipsABIInfo(CodeGenTypes &CGT, bool _IsO32) :
6529 ABIInfo(CGT), IsO32(_IsO32), MinABIStackAlignInBytes(IsO32 ? 4 : 8),
6530 StackAlignInBytes(IsO32 ? 8 : 16) {}
6532 ABIArgInfo classifyReturnType(QualType RetTy) const;
6533 ABIArgInfo classifyArgumentType(QualType RetTy, uint64_t &Offset) const;
6534 void computeInfo(CGFunctionInfo &FI) const override;
6535 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
6536 QualType Ty) const override;
6537 bool shouldSignExtUnsignedType(QualType Ty) const override;
6540 class MIPSTargetCodeGenInfo : public TargetCodeGenInfo {
6541 unsigned SizeOfUnwindException;
6543 MIPSTargetCodeGenInfo(CodeGenTypes &CGT, bool IsO32)
6544 : TargetCodeGenInfo(new MipsABIInfo(CGT, IsO32)),
6545 SizeOfUnwindException(IsO32 ? 24 : 32) {}
6547 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
6551 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
6552 CodeGen::CodeGenModule &CGM) const override {
6553 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
6555 llvm::Function *Fn = cast<llvm::Function>(GV);
6556 if (FD->hasAttr<Mips16Attr>()) {
6557 Fn->addFnAttr("mips16");
6559 else if (FD->hasAttr<NoMips16Attr>()) {
6560 Fn->addFnAttr("nomips16");
6563 const MipsInterruptAttr *Attr = FD->getAttr<MipsInterruptAttr>();
6568 switch (Attr->getInterrupt()) {
6569 case MipsInterruptAttr::eic: Kind = "eic"; break;
6570 case MipsInterruptAttr::sw0: Kind = "sw0"; break;
6571 case MipsInterruptAttr::sw1: Kind = "sw1"; break;
6572 case MipsInterruptAttr::hw0: Kind = "hw0"; break;
6573 case MipsInterruptAttr::hw1: Kind = "hw1"; break;
6574 case MipsInterruptAttr::hw2: Kind = "hw2"; break;
6575 case MipsInterruptAttr::hw3: Kind = "hw3"; break;
6576 case MipsInterruptAttr::hw4: Kind = "hw4"; break;
6577 case MipsInterruptAttr::hw5: Kind = "hw5"; break;
6580 Fn->addFnAttr("interrupt", Kind);
6584 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
6585 llvm::Value *Address) const override;
6587 unsigned getSizeOfUnwindException() const override {
6588 return SizeOfUnwindException;
6593 void MipsABIInfo::CoerceToIntArgs(
6594 uint64_t TySize, SmallVectorImpl<llvm::Type *> &ArgList) const {
6595 llvm::IntegerType *IntTy =
6596 llvm::IntegerType::get(getVMContext(), MinABIStackAlignInBytes * 8);
6598 // Add (TySize / MinABIStackAlignInBytes) args of IntTy.
6599 for (unsigned N = TySize / (MinABIStackAlignInBytes * 8); N; --N)
6600 ArgList.push_back(IntTy);
6602 // If necessary, add one more integer type to ArgList.
6603 unsigned R = TySize % (MinABIStackAlignInBytes * 8);
6606 ArgList.push_back(llvm::IntegerType::get(getVMContext(), R));
6609 // In N32/64, an aligned double precision floating point field is passed in
6611 llvm::Type* MipsABIInfo::HandleAggregates(QualType Ty, uint64_t TySize) const {
6612 SmallVector<llvm::Type*, 8> ArgList, IntArgList;
6615 CoerceToIntArgs(TySize, ArgList);
6616 return llvm::StructType::get(getVMContext(), ArgList);
6619 if (Ty->isComplexType())
6620 return CGT.ConvertType(Ty);
6622 const RecordType *RT = Ty->getAs<RecordType>();
6624 // Unions/vectors are passed in integer registers.
6625 if (!RT || !RT->isStructureOrClassType()) {
6626 CoerceToIntArgs(TySize, ArgList);
6627 return llvm::StructType::get(getVMContext(), ArgList);
6630 const RecordDecl *RD = RT->getDecl();
6631 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
6632 assert(!(TySize % 8) && "Size of structure must be multiple of 8.");
6634 uint64_t LastOffset = 0;
6636 llvm::IntegerType *I64 = llvm::IntegerType::get(getVMContext(), 64);
6638 // Iterate over fields in the struct/class and check if there are any aligned
6640 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
6641 i != e; ++i, ++idx) {
6642 const QualType Ty = i->getType();
6643 const BuiltinType *BT = Ty->getAs<BuiltinType>();
6645 if (!BT || BT->getKind() != BuiltinType::Double)
6648 uint64_t Offset = Layout.getFieldOffset(idx);
6649 if (Offset % 64) // Ignore doubles that are not aligned.
6652 // Add ((Offset - LastOffset) / 64) args of type i64.
6653 for (unsigned j = (Offset - LastOffset) / 64; j > 0; --j)
6654 ArgList.push_back(I64);
6657 ArgList.push_back(llvm::Type::getDoubleTy(getVMContext()));
6658 LastOffset = Offset + 64;
6661 CoerceToIntArgs(TySize - LastOffset, IntArgList);
6662 ArgList.append(IntArgList.begin(), IntArgList.end());
6664 return llvm::StructType::get(getVMContext(), ArgList);
6667 llvm::Type *MipsABIInfo::getPaddingType(uint64_t OrigOffset,
6668 uint64_t Offset) const {
6669 if (OrigOffset + MinABIStackAlignInBytes > Offset)
6672 return llvm::IntegerType::get(getVMContext(), (Offset - OrigOffset) * 8);
6676 MipsABIInfo::classifyArgumentType(QualType Ty, uint64_t &Offset) const {
6677 Ty = useFirstFieldIfTransparentUnion(Ty);
6679 uint64_t OrigOffset = Offset;
6680 uint64_t TySize = getContext().getTypeSize(Ty);
6681 uint64_t Align = getContext().getTypeAlign(Ty) / 8;
6683 Align = std::min(std::max(Align, (uint64_t)MinABIStackAlignInBytes),
6684 (uint64_t)StackAlignInBytes);
6685 unsigned CurrOffset = llvm::alignTo(Offset, Align);
6686 Offset = CurrOffset + llvm::alignTo(TySize, Align * 8) / 8;
6688 if (isAggregateTypeForABI(Ty) || Ty->isVectorType()) {
6689 // Ignore empty aggregates.
6691 return ABIArgInfo::getIgnore();
6693 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) {
6694 Offset = OrigOffset + MinABIStackAlignInBytes;
6695 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
6698 // If we have reached here, aggregates are passed directly by coercing to
6699 // another structure type. Padding is inserted if the offset of the
6700 // aggregate is unaligned.
6701 ABIArgInfo ArgInfo =
6702 ABIArgInfo::getDirect(HandleAggregates(Ty, TySize), 0,
6703 getPaddingType(OrigOffset, CurrOffset));
6704 ArgInfo.setInReg(true);
6708 // Treat an enum type as its underlying type.
6709 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
6710 Ty = EnumTy->getDecl()->getIntegerType();
6712 // All integral types are promoted to the GPR width.
6713 if (Ty->isIntegralOrEnumerationType())
6714 return ABIArgInfo::getExtend();
6716 return ABIArgInfo::getDirect(
6717 nullptr, 0, IsO32 ? nullptr : getPaddingType(OrigOffset, CurrOffset));
6721 MipsABIInfo::returnAggregateInRegs(QualType RetTy, uint64_t Size) const {
6722 const RecordType *RT = RetTy->getAs<RecordType>();
6723 SmallVector<llvm::Type*, 8> RTList;
6725 if (RT && RT->isStructureOrClassType()) {
6726 const RecordDecl *RD = RT->getDecl();
6727 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
6728 unsigned FieldCnt = Layout.getFieldCount();
6730 // N32/64 returns struct/classes in floating point registers if the
6731 // following conditions are met:
6732 // 1. The size of the struct/class is no larger than 128-bit.
6733 // 2. The struct/class has one or two fields all of which are floating
6735 // 3. The offset of the first field is zero (this follows what gcc does).
6737 // Any other composite results are returned in integer registers.
6739 if (FieldCnt && (FieldCnt <= 2) && !Layout.getFieldOffset(0)) {
6740 RecordDecl::field_iterator b = RD->field_begin(), e = RD->field_end();
6741 for (; b != e; ++b) {
6742 const BuiltinType *BT = b->getType()->getAs<BuiltinType>();
6744 if (!BT || !BT->isFloatingPoint())
6747 RTList.push_back(CGT.ConvertType(b->getType()));
6751 return llvm::StructType::get(getVMContext(), RTList,
6752 RD->hasAttr<PackedAttr>());
6758 CoerceToIntArgs(Size, RTList);
6759 return llvm::StructType::get(getVMContext(), RTList);
6762 ABIArgInfo MipsABIInfo::classifyReturnType(QualType RetTy) const {
6763 uint64_t Size = getContext().getTypeSize(RetTy);
6765 if (RetTy->isVoidType())
6766 return ABIArgInfo::getIgnore();
6768 // O32 doesn't treat zero-sized structs differently from other structs.
6769 // However, N32/N64 ignores zero sized return values.
6770 if (!IsO32 && Size == 0)
6771 return ABIArgInfo::getIgnore();
6773 if (isAggregateTypeForABI(RetTy) || RetTy->isVectorType()) {
6775 if (RetTy->isAnyComplexType())
6776 return ABIArgInfo::getDirect();
6778 // O32 returns integer vectors in registers and N32/N64 returns all small
6779 // aggregates in registers.
6781 (RetTy->isVectorType() && !RetTy->hasFloatingRepresentation())) {
6782 ABIArgInfo ArgInfo =
6783 ABIArgInfo::getDirect(returnAggregateInRegs(RetTy, Size));
6784 ArgInfo.setInReg(true);
6789 return getNaturalAlignIndirect(RetTy);
6792 // Treat an enum type as its underlying type.
6793 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
6794 RetTy = EnumTy->getDecl()->getIntegerType();
6796 return (RetTy->isPromotableIntegerType() ?
6797 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
6800 void MipsABIInfo::computeInfo(CGFunctionInfo &FI) const {
6801 ABIArgInfo &RetInfo = FI.getReturnInfo();
6802 if (!getCXXABI().classifyReturnType(FI))
6803 RetInfo = classifyReturnType(FI.getReturnType());
6805 // Check if a pointer to an aggregate is passed as a hidden argument.
6806 uint64_t Offset = RetInfo.isIndirect() ? MinABIStackAlignInBytes : 0;
6808 for (auto &I : FI.arguments())
6809 I.info = classifyArgumentType(I.type, Offset);
6812 Address MipsABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
6813 QualType OrigTy) const {
6814 QualType Ty = OrigTy;
6816 // Integer arguments are promoted to 32-bit on O32 and 64-bit on N32/N64.
6817 // Pointers are also promoted in the same way but this only matters for N32.
6818 unsigned SlotSizeInBits = IsO32 ? 32 : 64;
6819 unsigned PtrWidth = getTarget().getPointerWidth(0);
6820 bool DidPromote = false;
6821 if ((Ty->isIntegerType() &&
6822 getContext().getIntWidth(Ty) < SlotSizeInBits) ||
6823 (Ty->isPointerType() && PtrWidth < SlotSizeInBits)) {
6825 Ty = getContext().getIntTypeForBitwidth(SlotSizeInBits,
6826 Ty->isSignedIntegerType());
6829 auto TyInfo = getContext().getTypeInfoInChars(Ty);
6831 // The alignment of things in the argument area is never larger than
6832 // StackAlignInBytes.
6834 std::min(TyInfo.second, CharUnits::fromQuantity(StackAlignInBytes));
6836 // MinABIStackAlignInBytes is the size of argument slots on the stack.
6837 CharUnits ArgSlotSize = CharUnits::fromQuantity(MinABIStackAlignInBytes);
6839 Address Addr = emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*indirect*/ false,
6840 TyInfo, ArgSlotSize, /*AllowHigherAlign*/ true);
6843 // If there was a promotion, "unpromote" into a temporary.
6844 // TODO: can we just use a pointer into a subset of the original slot?
6846 Address Temp = CGF.CreateMemTemp(OrigTy, "vaarg.promotion-temp");
6847 llvm::Value *Promoted = CGF.Builder.CreateLoad(Addr);
6849 // Truncate down to the right width.
6850 llvm::Type *IntTy = (OrigTy->isIntegerType() ? Temp.getElementType()
6852 llvm::Value *V = CGF.Builder.CreateTrunc(Promoted, IntTy);
6853 if (OrigTy->isPointerType())
6854 V = CGF.Builder.CreateIntToPtr(V, Temp.getElementType());
6856 CGF.Builder.CreateStore(V, Temp);
6863 bool MipsABIInfo::shouldSignExtUnsignedType(QualType Ty) const {
6864 int TySize = getContext().getTypeSize(Ty);
6866 // MIPS64 ABI requires unsigned 32 bit integers to be sign extended.
6867 if (Ty->isUnsignedIntegerOrEnumerationType() && TySize == 32)
6874 MIPSTargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
6875 llvm::Value *Address) const {
6876 // This information comes from gcc's implementation, which seems to
6877 // as canonical as it gets.
6879 // Everything on MIPS is 4 bytes. Double-precision FP registers
6880 // are aliased to pairs of single-precision FP registers.
6881 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4);
6883 // 0-31 are the general purpose registers, $0 - $31.
6884 // 32-63 are the floating-point registers, $f0 - $f31.
6885 // 64 and 65 are the multiply/divide registers, $hi and $lo.
6886 // 66 is the (notional, I think) register for signal-handler return.
6887 AssignToArrayRange(CGF.Builder, Address, Four8, 0, 65);
6889 // 67-74 are the floating-point status registers, $fcc0 - $fcc7.
6890 // They are one bit wide and ignored here.
6892 // 80-111 are the coprocessor 0 registers, $c0r0 - $c0r31.
6893 // (coprocessor 1 is the FP unit)
6894 // 112-143 are the coprocessor 2 registers, $c2r0 - $c2r31.
6895 // 144-175 are the coprocessor 3 registers, $c3r0 - $c3r31.
6896 // 176-181 are the DSP accumulator registers.
6897 AssignToArrayRange(CGF.Builder, Address, Four8, 80, 181);
6901 //===----------------------------------------------------------------------===//
6902 // AVR ABI Implementation.
6903 //===----------------------------------------------------------------------===//
6906 class AVRTargetCodeGenInfo : public TargetCodeGenInfo {
6908 AVRTargetCodeGenInfo(CodeGenTypes &CGT)
6909 : TargetCodeGenInfo(new DefaultABIInfo(CGT)) { }
6911 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
6912 CodeGen::CodeGenModule &CGM) const override {
6913 const auto *FD = dyn_cast_or_null<FunctionDecl>(D);
6915 auto *Fn = cast<llvm::Function>(GV);
6917 if (FD->getAttr<AVRInterruptAttr>())
6918 Fn->addFnAttr("interrupt");
6920 if (FD->getAttr<AVRSignalAttr>())
6921 Fn->addFnAttr("signal");
6926 //===----------------------------------------------------------------------===//
6927 // TCE ABI Implementation (see http://tce.cs.tut.fi). Uses mostly the defaults.
6928 // Currently subclassed only to implement custom OpenCL C function attribute
6930 //===----------------------------------------------------------------------===//
6934 class TCETargetCodeGenInfo : public DefaultTargetCodeGenInfo {
6936 TCETargetCodeGenInfo(CodeGenTypes &CGT)
6937 : DefaultTargetCodeGenInfo(CGT) {}
6939 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
6940 CodeGen::CodeGenModule &M) const override;
6943 void TCETargetCodeGenInfo::setTargetAttributes(
6944 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const {
6945 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
6948 llvm::Function *F = cast<llvm::Function>(GV);
6950 if (M.getLangOpts().OpenCL) {
6951 if (FD->hasAttr<OpenCLKernelAttr>()) {
6952 // OpenCL C Kernel functions are not subject to inlining
6953 F->addFnAttr(llvm::Attribute::NoInline);
6954 const ReqdWorkGroupSizeAttr *Attr = FD->getAttr<ReqdWorkGroupSizeAttr>();
6956 // Convert the reqd_work_group_size() attributes to metadata.
6957 llvm::LLVMContext &Context = F->getContext();
6958 llvm::NamedMDNode *OpenCLMetadata =
6959 M.getModule().getOrInsertNamedMetadata(
6960 "opencl.kernel_wg_size_info");
6962 SmallVector<llvm::Metadata *, 5> Operands;
6963 Operands.push_back(llvm::ConstantAsMetadata::get(F));
6966 llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue(
6967 M.Int32Ty, llvm::APInt(32, Attr->getXDim()))));
6969 llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue(
6970 M.Int32Ty, llvm::APInt(32, Attr->getYDim()))));
6972 llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue(
6973 M.Int32Ty, llvm::APInt(32, Attr->getZDim()))));
6975 // Add a boolean constant operand for "required" (true) or "hint"
6976 // (false) for implementing the work_group_size_hint attr later.
6977 // Currently always true as the hint is not yet implemented.
6979 llvm::ConstantAsMetadata::get(llvm::ConstantInt::getTrue(Context)));
6980 OpenCLMetadata->addOperand(llvm::MDNode::get(Context, Operands));
6988 //===----------------------------------------------------------------------===//
6989 // Hexagon ABI Implementation
6990 //===----------------------------------------------------------------------===//
6994 class HexagonABIInfo : public ABIInfo {
6998 HexagonABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
7002 ABIArgInfo classifyReturnType(QualType RetTy) const;
7003 ABIArgInfo classifyArgumentType(QualType RetTy) const;
7005 void computeInfo(CGFunctionInfo &FI) const override;
7007 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
7008 QualType Ty) const override;
7011 class HexagonTargetCodeGenInfo : public TargetCodeGenInfo {
7013 HexagonTargetCodeGenInfo(CodeGenTypes &CGT)
7014 :TargetCodeGenInfo(new HexagonABIInfo(CGT)) {}
7016 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
7023 void HexagonABIInfo::computeInfo(CGFunctionInfo &FI) const {
7024 if (!getCXXABI().classifyReturnType(FI))
7025 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
7026 for (auto &I : FI.arguments())
7027 I.info = classifyArgumentType(I.type);
7030 ABIArgInfo HexagonABIInfo::classifyArgumentType(QualType Ty) const {
7031 if (!isAggregateTypeForABI(Ty)) {
7032 // Treat an enum type as its underlying type.
7033 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
7034 Ty = EnumTy->getDecl()->getIntegerType();
7036 return (Ty->isPromotableIntegerType() ?
7037 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
7040 // Ignore empty records.
7041 if (isEmptyRecord(getContext(), Ty, true))
7042 return ABIArgInfo::getIgnore();
7044 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
7045 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
7047 uint64_t Size = getContext().getTypeSize(Ty);
7049 return getNaturalAlignIndirect(Ty, /*ByVal=*/true);
7050 // Pass in the smallest viable integer type.
7052 return ABIArgInfo::getDirect(llvm::Type::getInt64Ty(getVMContext()));
7054 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
7056 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
7058 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
7061 ABIArgInfo HexagonABIInfo::classifyReturnType(QualType RetTy) const {
7062 if (RetTy->isVoidType())
7063 return ABIArgInfo::getIgnore();
7065 // Large vector types should be returned via memory.
7066 if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 64)
7067 return getNaturalAlignIndirect(RetTy);
7069 if (!isAggregateTypeForABI(RetTy)) {
7070 // Treat an enum type as its underlying type.
7071 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
7072 RetTy = EnumTy->getDecl()->getIntegerType();
7074 return (RetTy->isPromotableIntegerType() ?
7075 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
7078 if (isEmptyRecord(getContext(), RetTy, true))
7079 return ABIArgInfo::getIgnore();
7081 // Aggregates <= 8 bytes are returned in r0; other aggregates
7082 // are returned indirectly.
7083 uint64_t Size = getContext().getTypeSize(RetTy);
7085 // Return in the smallest viable integer type.
7087 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
7089 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
7091 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
7092 return ABIArgInfo::getDirect(llvm::Type::getInt64Ty(getVMContext()));
7095 return getNaturalAlignIndirect(RetTy, /*ByVal=*/true);
7098 Address HexagonABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
7099 QualType Ty) const {
7100 // FIXME: Someone needs to audit that this handle alignment correctly.
7101 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*indirect*/ false,
7102 getContext().getTypeInfoInChars(Ty),
7103 CharUnits::fromQuantity(4),
7104 /*AllowHigherAlign*/ true);
7107 //===----------------------------------------------------------------------===//
7108 // Lanai ABI Implementation
7109 //===----------------------------------------------------------------------===//
7112 class LanaiABIInfo : public DefaultABIInfo {
7114 LanaiABIInfo(CodeGen::CodeGenTypes &CGT) : DefaultABIInfo(CGT) {}
7116 bool shouldUseInReg(QualType Ty, CCState &State) const;
7118 void computeInfo(CGFunctionInfo &FI) const override {
7119 CCState State(FI.getCallingConvention());
7120 // Lanai uses 4 registers to pass arguments unless the function has the
7121 // regparm attribute set.
7122 if (FI.getHasRegParm()) {
7123 State.FreeRegs = FI.getRegParm();
7128 if (!getCXXABI().classifyReturnType(FI))
7129 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
7130 for (auto &I : FI.arguments())
7131 I.info = classifyArgumentType(I.type, State);
7134 ABIArgInfo getIndirectResult(QualType Ty, bool ByVal, CCState &State) const;
7135 ABIArgInfo classifyArgumentType(QualType RetTy, CCState &State) const;
7137 } // end anonymous namespace
7139 bool LanaiABIInfo::shouldUseInReg(QualType Ty, CCState &State) const {
7140 unsigned Size = getContext().getTypeSize(Ty);
7141 unsigned SizeInRegs = llvm::alignTo(Size, 32U) / 32U;
7143 if (SizeInRegs == 0)
7146 if (SizeInRegs > State.FreeRegs) {
7151 State.FreeRegs -= SizeInRegs;
7156 ABIArgInfo LanaiABIInfo::getIndirectResult(QualType Ty, bool ByVal,
7157 CCState &State) const {
7159 if (State.FreeRegs) {
7160 --State.FreeRegs; // Non-byval indirects just use one pointer.
7161 return getNaturalAlignIndirectInReg(Ty);
7163 return getNaturalAlignIndirect(Ty, false);
7166 // Compute the byval alignment.
7167 const unsigned MinABIStackAlignInBytes = 4;
7168 unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8;
7169 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(4), /*ByVal=*/true,
7170 /*Realign=*/TypeAlign >
7171 MinABIStackAlignInBytes);
7174 ABIArgInfo LanaiABIInfo::classifyArgumentType(QualType Ty,
7175 CCState &State) const {
7176 // Check with the C++ ABI first.
7177 const RecordType *RT = Ty->getAs<RecordType>();
7179 CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI());
7180 if (RAA == CGCXXABI::RAA_Indirect) {
7181 return getIndirectResult(Ty, /*ByVal=*/false, State);
7182 } else if (RAA == CGCXXABI::RAA_DirectInMemory) {
7183 return getNaturalAlignIndirect(Ty, /*ByRef=*/true);
7187 if (isAggregateTypeForABI(Ty)) {
7188 // Structures with flexible arrays are always indirect.
7189 if (RT && RT->getDecl()->hasFlexibleArrayMember())
7190 return getIndirectResult(Ty, /*ByVal=*/true, State);
7192 // Ignore empty structs/unions.
7193 if (isEmptyRecord(getContext(), Ty, true))
7194 return ABIArgInfo::getIgnore();
7196 llvm::LLVMContext &LLVMContext = getVMContext();
7197 unsigned SizeInRegs = (getContext().getTypeSize(Ty) + 31) / 32;
7198 if (SizeInRegs <= State.FreeRegs) {
7199 llvm::IntegerType *Int32 = llvm::Type::getInt32Ty(LLVMContext);
7200 SmallVector<llvm::Type *, 3> Elements(SizeInRegs, Int32);
7201 llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements);
7202 State.FreeRegs -= SizeInRegs;
7203 return ABIArgInfo::getDirectInReg(Result);
7207 return getIndirectResult(Ty, true, State);
7210 // Treat an enum type as its underlying type.
7211 if (const auto *EnumTy = Ty->getAs<EnumType>())
7212 Ty = EnumTy->getDecl()->getIntegerType();
7214 bool InReg = shouldUseInReg(Ty, State);
7215 if (Ty->isPromotableIntegerType()) {
7217 return ABIArgInfo::getDirectInReg();
7218 return ABIArgInfo::getExtend();
7221 return ABIArgInfo::getDirectInReg();
7222 return ABIArgInfo::getDirect();
7226 class LanaiTargetCodeGenInfo : public TargetCodeGenInfo {
7228 LanaiTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
7229 : TargetCodeGenInfo(new LanaiABIInfo(CGT)) {}
7233 //===----------------------------------------------------------------------===//
7234 // AMDGPU ABI Implementation
7235 //===----------------------------------------------------------------------===//
7239 class AMDGPUABIInfo final : public DefaultABIInfo {
7241 explicit AMDGPUABIInfo(CodeGen::CodeGenTypes &CGT) : DefaultABIInfo(CGT) {}
7244 ABIArgInfo classifyArgumentType(QualType Ty) const;
7246 void computeInfo(CGFunctionInfo &FI) const override;
7249 void AMDGPUABIInfo::computeInfo(CGFunctionInfo &FI) const {
7250 if (!getCXXABI().classifyReturnType(FI))
7251 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
7253 unsigned CC = FI.getCallingConvention();
7254 for (auto &Arg : FI.arguments())
7255 if (CC == llvm::CallingConv::AMDGPU_KERNEL)
7256 Arg.info = classifyArgumentType(Arg.type);
7258 Arg.info = DefaultABIInfo::classifyArgumentType(Arg.type);
7261 /// \brief Classify argument of given type \p Ty.
7262 ABIArgInfo AMDGPUABIInfo::classifyArgumentType(QualType Ty) const {
7263 llvm::StructType *StrTy = dyn_cast<llvm::StructType>(CGT.ConvertType(Ty));
7265 return DefaultABIInfo::classifyArgumentType(Ty);
7268 // Coerce single element structs to its element.
7269 if (StrTy->getNumElements() == 1) {
7270 return ABIArgInfo::getDirect();
7273 // If we set CanBeFlattened to true, CodeGen will expand the struct to its
7274 // individual elements, which confuses the Clover OpenCL backend; therefore we
7275 // have to set it to false here. Other args of getDirect() are just defaults.
7276 return ABIArgInfo::getDirect(nullptr, 0, nullptr, false);
7279 class AMDGPUTargetCodeGenInfo : public TargetCodeGenInfo {
7281 AMDGPUTargetCodeGenInfo(CodeGenTypes &CGT)
7282 : TargetCodeGenInfo(new AMDGPUABIInfo(CGT)) {}
7283 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
7284 CodeGen::CodeGenModule &M) const override;
7285 unsigned getOpenCLKernelCallingConv() const override;
7287 llvm::Constant *getNullPointer(const CodeGen::CodeGenModule &CGM,
7288 llvm::PointerType *T, QualType QT) const override;
7292 static void appendOpenCLVersionMD (CodeGen::CodeGenModule &CGM);
7294 void AMDGPUTargetCodeGenInfo::setTargetAttributes(
7296 llvm::GlobalValue *GV,
7297 CodeGen::CodeGenModule &M) const {
7298 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
7302 llvm::Function *F = cast<llvm::Function>(GV);
7304 const auto *ReqdWGS = M.getLangOpts().OpenCL ?
7305 FD->getAttr<ReqdWorkGroupSizeAttr>() : nullptr;
7306 const auto *FlatWGS = FD->getAttr<AMDGPUFlatWorkGroupSizeAttr>();
7307 if (ReqdWGS || FlatWGS) {
7308 unsigned Min = FlatWGS ? FlatWGS->getMin() : 0;
7309 unsigned Max = FlatWGS ? FlatWGS->getMax() : 0;
7310 if (ReqdWGS && Min == 0 && Max == 0)
7311 Min = Max = ReqdWGS->getXDim() * ReqdWGS->getYDim() * ReqdWGS->getZDim();
7314 assert(Min <= Max && "Min must be less than or equal Max");
7316 std::string AttrVal = llvm::utostr(Min) + "," + llvm::utostr(Max);
7317 F->addFnAttr("amdgpu-flat-work-group-size", AttrVal);
7319 assert(Max == 0 && "Max must be zero");
7322 if (const auto *Attr = FD->getAttr<AMDGPUWavesPerEUAttr>()) {
7323 unsigned Min = Attr->getMin();
7324 unsigned Max = Attr->getMax();
7327 assert((Max == 0 || Min <= Max) && "Min must be less than or equal Max");
7329 std::string AttrVal = llvm::utostr(Min);
7331 AttrVal = AttrVal + "," + llvm::utostr(Max);
7332 F->addFnAttr("amdgpu-waves-per-eu", AttrVal);
7334 assert(Max == 0 && "Max must be zero");
7337 if (const auto *Attr = FD->getAttr<AMDGPUNumSGPRAttr>()) {
7338 unsigned NumSGPR = Attr->getNumSGPR();
7341 F->addFnAttr("amdgpu-num-sgpr", llvm::utostr(NumSGPR));
7344 if (const auto *Attr = FD->getAttr<AMDGPUNumVGPRAttr>()) {
7345 uint32_t NumVGPR = Attr->getNumVGPR();
7348 F->addFnAttr("amdgpu-num-vgpr", llvm::utostr(NumVGPR));
7351 appendOpenCLVersionMD(M);
7354 unsigned AMDGPUTargetCodeGenInfo::getOpenCLKernelCallingConv() const {
7355 return llvm::CallingConv::AMDGPU_KERNEL;
7358 // Currently LLVM assumes null pointers always have value 0,
7359 // which results in incorrectly transformed IR. Therefore, instead of
7360 // emitting null pointers in private and local address spaces, a null
7361 // pointer in generic address space is emitted which is casted to a
7362 // pointer in local or private address space.
7363 llvm::Constant *AMDGPUTargetCodeGenInfo::getNullPointer(
7364 const CodeGen::CodeGenModule &CGM, llvm::PointerType *PT,
7365 QualType QT) const {
7366 if (CGM.getContext().getTargetNullPointerValue(QT) == 0)
7367 return llvm::ConstantPointerNull::get(PT);
7369 auto &Ctx = CGM.getContext();
7370 auto NPT = llvm::PointerType::get(PT->getElementType(),
7371 Ctx.getTargetAddressSpace(LangAS::opencl_generic));
7372 return llvm::ConstantExpr::getAddrSpaceCast(
7373 llvm::ConstantPointerNull::get(NPT), PT);
7376 //===----------------------------------------------------------------------===//
7377 // SPARC v8 ABI Implementation.
7378 // Based on the SPARC Compliance Definition version 2.4.1.
7380 // Ensures that complex values are passed in registers.
7383 class SparcV8ABIInfo : public DefaultABIInfo {
7385 SparcV8ABIInfo(CodeGenTypes &CGT) : DefaultABIInfo(CGT) {}
7388 ABIArgInfo classifyReturnType(QualType RetTy) const;
7389 void computeInfo(CGFunctionInfo &FI) const override;
7391 } // end anonymous namespace
7395 SparcV8ABIInfo::classifyReturnType(QualType Ty) const {
7396 if (Ty->isAnyComplexType()) {
7397 return ABIArgInfo::getDirect();
7400 return DefaultABIInfo::classifyReturnType(Ty);
7404 void SparcV8ABIInfo::computeInfo(CGFunctionInfo &FI) const {
7406 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
7407 for (auto &Arg : FI.arguments())
7408 Arg.info = classifyArgumentType(Arg.type);
7412 class SparcV8TargetCodeGenInfo : public TargetCodeGenInfo {
7414 SparcV8TargetCodeGenInfo(CodeGenTypes &CGT)
7415 : TargetCodeGenInfo(new SparcV8ABIInfo(CGT)) {}
7417 } // end anonymous namespace
7419 //===----------------------------------------------------------------------===//
7420 // SPARC v9 ABI Implementation.
7421 // Based on the SPARC Compliance Definition version 2.4.1.
7423 // Function arguments a mapped to a nominal "parameter array" and promoted to
7424 // registers depending on their type. Each argument occupies 8 or 16 bytes in
7425 // the array, structs larger than 16 bytes are passed indirectly.
7427 // One case requires special care:
7434 // When a struct mixed is passed by value, it only occupies 8 bytes in the
7435 // parameter array, but the int is passed in an integer register, and the float
7436 // is passed in a floating point register. This is represented as two arguments
7437 // with the LLVM IR inreg attribute:
7439 // declare void f(i32 inreg %i, float inreg %f)
7441 // The code generator will only allocate 4 bytes from the parameter array for
7442 // the inreg arguments. All other arguments are allocated a multiple of 8
7446 class SparcV9ABIInfo : public ABIInfo {
7448 SparcV9ABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
7451 ABIArgInfo classifyType(QualType RetTy, unsigned SizeLimit) const;
7452 void computeInfo(CGFunctionInfo &FI) const override;
7453 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
7454 QualType Ty) const override;
7456 // Coercion type builder for structs passed in registers. The coercion type
7457 // serves two purposes:
7459 // 1. Pad structs to a multiple of 64 bits, so they are passed 'left-aligned'
7461 // 2. Expose aligned floating point elements as first-level elements, so the
7462 // code generator knows to pass them in floating point registers.
7464 // We also compute the InReg flag which indicates that the struct contains
7465 // aligned 32-bit floats.
7467 struct CoerceBuilder {
7468 llvm::LLVMContext &Context;
7469 const llvm::DataLayout &DL;
7470 SmallVector<llvm::Type*, 8> Elems;
7474 CoerceBuilder(llvm::LLVMContext &c, const llvm::DataLayout &dl)
7475 : Context(c), DL(dl), Size(0), InReg(false) {}
7477 // Pad Elems with integers until Size is ToSize.
7478 void pad(uint64_t ToSize) {
7479 assert(ToSize >= Size && "Cannot remove elements");
7483 // Finish the current 64-bit word.
7484 uint64_t Aligned = llvm::alignTo(Size, 64);
7485 if (Aligned > Size && Aligned <= ToSize) {
7486 Elems.push_back(llvm::IntegerType::get(Context, Aligned - Size));
7490 // Add whole 64-bit words.
7491 while (Size + 64 <= ToSize) {
7492 Elems.push_back(llvm::Type::getInt64Ty(Context));
7496 // Final in-word padding.
7497 if (Size < ToSize) {
7498 Elems.push_back(llvm::IntegerType::get(Context, ToSize - Size));
7503 // Add a floating point element at Offset.
7504 void addFloat(uint64_t Offset, llvm::Type *Ty, unsigned Bits) {
7505 // Unaligned floats are treated as integers.
7508 // The InReg flag is only required if there are any floats < 64 bits.
7512 Elems.push_back(Ty);
7513 Size = Offset + Bits;
7516 // Add a struct type to the coercion type, starting at Offset (in bits).
7517 void addStruct(uint64_t Offset, llvm::StructType *StrTy) {
7518 const llvm::StructLayout *Layout = DL.getStructLayout(StrTy);
7519 for (unsigned i = 0, e = StrTy->getNumElements(); i != e; ++i) {
7520 llvm::Type *ElemTy = StrTy->getElementType(i);
7521 uint64_t ElemOffset = Offset + Layout->getElementOffsetInBits(i);
7522 switch (ElemTy->getTypeID()) {
7523 case llvm::Type::StructTyID:
7524 addStruct(ElemOffset, cast<llvm::StructType>(ElemTy));
7526 case llvm::Type::FloatTyID:
7527 addFloat(ElemOffset, ElemTy, 32);
7529 case llvm::Type::DoubleTyID:
7530 addFloat(ElemOffset, ElemTy, 64);
7532 case llvm::Type::FP128TyID:
7533 addFloat(ElemOffset, ElemTy, 128);
7535 case llvm::Type::PointerTyID:
7536 if (ElemOffset % 64 == 0) {
7538 Elems.push_back(ElemTy);
7548 // Check if Ty is a usable substitute for the coercion type.
7549 bool isUsableType(llvm::StructType *Ty) const {
7550 return llvm::makeArrayRef(Elems) == Ty->elements();
7553 // Get the coercion type as a literal struct type.
7554 llvm::Type *getType() const {
7555 if (Elems.size() == 1)
7556 return Elems.front();
7558 return llvm::StructType::get(Context, Elems);
7562 } // end anonymous namespace
7565 SparcV9ABIInfo::classifyType(QualType Ty, unsigned SizeLimit) const {
7566 if (Ty->isVoidType())
7567 return ABIArgInfo::getIgnore();
7569 uint64_t Size = getContext().getTypeSize(Ty);
7571 // Anything too big to fit in registers is passed with an explicit indirect
7572 // pointer / sret pointer.
7573 if (Size > SizeLimit)
7574 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
7576 // Treat an enum type as its underlying type.
7577 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
7578 Ty = EnumTy->getDecl()->getIntegerType();
7580 // Integer types smaller than a register are extended.
7581 if (Size < 64 && Ty->isIntegerType())
7582 return ABIArgInfo::getExtend();
7584 // Other non-aggregates go in registers.
7585 if (!isAggregateTypeForABI(Ty))
7586 return ABIArgInfo::getDirect();
7588 // If a C++ object has either a non-trivial copy constructor or a non-trivial
7589 // destructor, it is passed with an explicit indirect pointer / sret pointer.
7590 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
7591 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
7593 // This is a small aggregate type that should be passed in registers.
7594 // Build a coercion type from the LLVM struct type.
7595 llvm::StructType *StrTy = dyn_cast<llvm::StructType>(CGT.ConvertType(Ty));
7597 return ABIArgInfo::getDirect();
7599 CoerceBuilder CB(getVMContext(), getDataLayout());
7600 CB.addStruct(0, StrTy);
7601 CB.pad(llvm::alignTo(CB.DL.getTypeSizeInBits(StrTy), 64));
7603 // Try to use the original type for coercion.
7604 llvm::Type *CoerceTy = CB.isUsableType(StrTy) ? StrTy : CB.getType();
7607 return ABIArgInfo::getDirectInReg(CoerceTy);
7609 return ABIArgInfo::getDirect(CoerceTy);
7612 Address SparcV9ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
7613 QualType Ty) const {
7614 ABIArgInfo AI = classifyType(Ty, 16 * 8);
7615 llvm::Type *ArgTy = CGT.ConvertType(Ty);
7616 if (AI.canHaveCoerceToType() && !AI.getCoerceToType())
7617 AI.setCoerceToType(ArgTy);
7619 CharUnits SlotSize = CharUnits::fromQuantity(8);
7621 CGBuilderTy &Builder = CGF.Builder;
7622 Address Addr(Builder.CreateLoad(VAListAddr, "ap.cur"), SlotSize);
7623 llvm::Type *ArgPtrTy = llvm::PointerType::getUnqual(ArgTy);
7625 auto TypeInfo = getContext().getTypeInfoInChars(Ty);
7627 Address ArgAddr = Address::invalid();
7629 switch (AI.getKind()) {
7630 case ABIArgInfo::Expand:
7631 case ABIArgInfo::CoerceAndExpand:
7632 case ABIArgInfo::InAlloca:
7633 llvm_unreachable("Unsupported ABI kind for va_arg");
7635 case ABIArgInfo::Extend: {
7637 CharUnits Offset = SlotSize - TypeInfo.first;
7638 ArgAddr = Builder.CreateConstInBoundsByteGEP(Addr, Offset, "extend");
7642 case ABIArgInfo::Direct: {
7643 auto AllocSize = getDataLayout().getTypeAllocSize(AI.getCoerceToType());
7644 Stride = CharUnits::fromQuantity(AllocSize).alignTo(SlotSize);
7649 case ABIArgInfo::Indirect:
7651 ArgAddr = Builder.CreateElementBitCast(Addr, ArgPtrTy, "indirect");
7652 ArgAddr = Address(Builder.CreateLoad(ArgAddr, "indirect.arg"),
7656 case ABIArgInfo::Ignore:
7657 return Address(llvm::UndefValue::get(ArgPtrTy), TypeInfo.second);
7661 llvm::Value *NextPtr =
7662 Builder.CreateConstInBoundsByteGEP(Addr.getPointer(), Stride, "ap.next");
7663 Builder.CreateStore(NextPtr, VAListAddr);
7665 return Builder.CreateBitCast(ArgAddr, ArgPtrTy, "arg.addr");
7668 void SparcV9ABIInfo::computeInfo(CGFunctionInfo &FI) const {
7669 FI.getReturnInfo() = classifyType(FI.getReturnType(), 32 * 8);
7670 for (auto &I : FI.arguments())
7671 I.info = classifyType(I.type, 16 * 8);
7675 class SparcV9TargetCodeGenInfo : public TargetCodeGenInfo {
7677 SparcV9TargetCodeGenInfo(CodeGenTypes &CGT)
7678 : TargetCodeGenInfo(new SparcV9ABIInfo(CGT)) {}
7680 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
7684 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
7685 llvm::Value *Address) const override;
7687 } // end anonymous namespace
7690 SparcV9TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
7691 llvm::Value *Address) const {
7692 // This is calculated from the LLVM and GCC tables and verified
7693 // against gcc output. AFAIK all ABIs use the same encoding.
7695 CodeGen::CGBuilderTy &Builder = CGF.Builder;
7697 llvm::IntegerType *i8 = CGF.Int8Ty;
7698 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
7699 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
7701 // 0-31: the 8-byte general-purpose registers
7702 AssignToArrayRange(Builder, Address, Eight8, 0, 31);
7704 // 32-63: f0-31, the 4-byte floating-point registers
7705 AssignToArrayRange(Builder, Address, Four8, 32, 63);
7715 AssignToArrayRange(Builder, Address, Eight8, 64, 71);
7717 // 72-87: d0-15, the 8-byte floating-point registers
7718 AssignToArrayRange(Builder, Address, Eight8, 72, 87);
7724 //===----------------------------------------------------------------------===//
7725 // XCore ABI Implementation
7726 //===----------------------------------------------------------------------===//
7730 /// A SmallStringEnc instance is used to build up the TypeString by passing
7731 /// it by reference between functions that append to it.
7732 typedef llvm::SmallString<128> SmallStringEnc;
7734 /// TypeStringCache caches the meta encodings of Types.
7736 /// The reason for caching TypeStrings is two fold:
7737 /// 1. To cache a type's encoding for later uses;
7738 /// 2. As a means to break recursive member type inclusion.
7740 /// A cache Entry can have a Status of:
7741 /// NonRecursive: The type encoding is not recursive;
7742 /// Recursive: The type encoding is recursive;
7743 /// Incomplete: An incomplete TypeString;
7744 /// IncompleteUsed: An incomplete TypeString that has been used in a
7745 /// Recursive type encoding.
7747 /// A NonRecursive entry will have all of its sub-members expanded as fully
7748 /// as possible. Whilst it may contain types which are recursive, the type
7749 /// itself is not recursive and thus its encoding may be safely used whenever
7750 /// the type is encountered.
7752 /// A Recursive entry will have all of its sub-members expanded as fully as
7753 /// possible. The type itself is recursive and it may contain other types which
7754 /// are recursive. The Recursive encoding must not be used during the expansion
7755 /// of a recursive type's recursive branch. For simplicity the code uses
7756 /// IncompleteCount to reject all usage of Recursive encodings for member types.
7758 /// An Incomplete entry is always a RecordType and only encodes its
7759 /// identifier e.g. "s(S){}". Incomplete 'StubEnc' entries are ephemeral and
7760 /// are placed into the cache during type expansion as a means to identify and
7761 /// handle recursive inclusion of types as sub-members. If there is recursion
7762 /// the entry becomes IncompleteUsed.
7764 /// During the expansion of a RecordType's members:
7766 /// If the cache contains a NonRecursive encoding for the member type, the
7767 /// cached encoding is used;
7769 /// If the cache contains a Recursive encoding for the member type, the
7770 /// cached encoding is 'Swapped' out, as it may be incorrect, and...
7772 /// If the member is a RecordType, an Incomplete encoding is placed into the
7773 /// cache to break potential recursive inclusion of itself as a sub-member;
7775 /// Once a member RecordType has been expanded, its temporary incomplete
7776 /// entry is removed from the cache. If a Recursive encoding was swapped out
7777 /// it is swapped back in;
7779 /// If an incomplete entry is used to expand a sub-member, the incomplete
7780 /// entry is marked as IncompleteUsed. The cache keeps count of how many
7781 /// IncompleteUsed entries it currently contains in IncompleteUsedCount;
7783 /// If a member's encoding is found to be a NonRecursive or Recursive viz:
7784 /// IncompleteUsedCount==0, the member's encoding is added to the cache.
7785 /// Else the member is part of a recursive type and thus the recursion has
7786 /// been exited too soon for the encoding to be correct for the member.
7788 class TypeStringCache {
7789 enum Status {NonRecursive, Recursive, Incomplete, IncompleteUsed};
7791 std::string Str; // The encoded TypeString for the type.
7792 enum Status State; // Information about the encoding in 'Str'.
7793 std::string Swapped; // A temporary place holder for a Recursive encoding
7794 // during the expansion of RecordType's members.
7796 std::map<const IdentifierInfo *, struct Entry> Map;
7797 unsigned IncompleteCount; // Number of Incomplete entries in the Map.
7798 unsigned IncompleteUsedCount; // Number of IncompleteUsed entries in the Map.
7800 TypeStringCache() : IncompleteCount(0), IncompleteUsedCount(0) {}
7801 void addIncomplete(const IdentifierInfo *ID, std::string StubEnc);
7802 bool removeIncomplete(const IdentifierInfo *ID);
7803 void addIfComplete(const IdentifierInfo *ID, StringRef Str,
7805 StringRef lookupStr(const IdentifierInfo *ID);
7808 /// TypeString encodings for enum & union fields must be order.
7809 /// FieldEncoding is a helper for this ordering process.
7810 class FieldEncoding {
7814 FieldEncoding(bool b, SmallStringEnc &e) : HasName(b), Enc(e.c_str()) {}
7815 StringRef str() { return Enc; }
7816 bool operator<(const FieldEncoding &rhs) const {
7817 if (HasName != rhs.HasName) return HasName;
7818 return Enc < rhs.Enc;
7822 class XCoreABIInfo : public DefaultABIInfo {
7824 XCoreABIInfo(CodeGen::CodeGenTypes &CGT) : DefaultABIInfo(CGT) {}
7825 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
7826 QualType Ty) const override;
7829 class XCoreTargetCodeGenInfo : public TargetCodeGenInfo {
7830 mutable TypeStringCache TSC;
7832 XCoreTargetCodeGenInfo(CodeGenTypes &CGT)
7833 :TargetCodeGenInfo(new XCoreABIInfo(CGT)) {}
7834 void emitTargetMD(const Decl *D, llvm::GlobalValue *GV,
7835 CodeGen::CodeGenModule &M) const override;
7838 } // End anonymous namespace.
7840 // TODO: this implementation is likely now redundant with the default
7842 Address XCoreABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
7843 QualType Ty) const {
7844 CGBuilderTy &Builder = CGF.Builder;
7847 CharUnits SlotSize = CharUnits::fromQuantity(4);
7848 Address AP(Builder.CreateLoad(VAListAddr), SlotSize);
7850 // Handle the argument.
7851 ABIArgInfo AI = classifyArgumentType(Ty);
7852 CharUnits TypeAlign = getContext().getTypeAlignInChars(Ty);
7853 llvm::Type *ArgTy = CGT.ConvertType(Ty);
7854 if (AI.canHaveCoerceToType() && !AI.getCoerceToType())
7855 AI.setCoerceToType(ArgTy);
7856 llvm::Type *ArgPtrTy = llvm::PointerType::getUnqual(ArgTy);
7858 Address Val = Address::invalid();
7859 CharUnits ArgSize = CharUnits::Zero();
7860 switch (AI.getKind()) {
7861 case ABIArgInfo::Expand:
7862 case ABIArgInfo::CoerceAndExpand:
7863 case ABIArgInfo::InAlloca:
7864 llvm_unreachable("Unsupported ABI kind for va_arg");
7865 case ABIArgInfo::Ignore:
7866 Val = Address(llvm::UndefValue::get(ArgPtrTy), TypeAlign);
7867 ArgSize = CharUnits::Zero();
7869 case ABIArgInfo::Extend:
7870 case ABIArgInfo::Direct:
7871 Val = Builder.CreateBitCast(AP, ArgPtrTy);
7872 ArgSize = CharUnits::fromQuantity(
7873 getDataLayout().getTypeAllocSize(AI.getCoerceToType()));
7874 ArgSize = ArgSize.alignTo(SlotSize);
7876 case ABIArgInfo::Indirect:
7877 Val = Builder.CreateElementBitCast(AP, ArgPtrTy);
7878 Val = Address(Builder.CreateLoad(Val), TypeAlign);
7883 // Increment the VAList.
7884 if (!ArgSize.isZero()) {
7886 Builder.CreateConstInBoundsByteGEP(AP.getPointer(), ArgSize);
7887 Builder.CreateStore(APN, VAListAddr);
7893 /// During the expansion of a RecordType, an incomplete TypeString is placed
7894 /// into the cache as a means to identify and break recursion.
7895 /// If there is a Recursive encoding in the cache, it is swapped out and will
7896 /// be reinserted by removeIncomplete().
7897 /// All other types of encoding should have been used rather than arriving here.
7898 void TypeStringCache::addIncomplete(const IdentifierInfo *ID,
7899 std::string StubEnc) {
7903 assert( (E.Str.empty() || E.State == Recursive) &&
7904 "Incorrectly use of addIncomplete");
7905 assert(!StubEnc.empty() && "Passing an empty string to addIncomplete()");
7906 E.Swapped.swap(E.Str); // swap out the Recursive
7907 E.Str.swap(StubEnc);
7908 E.State = Incomplete;
7912 /// Once the RecordType has been expanded, the temporary incomplete TypeString
7913 /// must be removed from the cache.
7914 /// If a Recursive was swapped out by addIncomplete(), it will be replaced.
7915 /// Returns true if the RecordType was defined recursively.
7916 bool TypeStringCache::removeIncomplete(const IdentifierInfo *ID) {
7919 auto I = Map.find(ID);
7920 assert(I != Map.end() && "Entry not present");
7921 Entry &E = I->second;
7922 assert( (E.State == Incomplete ||
7923 E.State == IncompleteUsed) &&
7924 "Entry must be an incomplete type");
7925 bool IsRecursive = false;
7926 if (E.State == IncompleteUsed) {
7927 // We made use of our Incomplete encoding, thus we are recursive.
7929 --IncompleteUsedCount;
7931 if (E.Swapped.empty())
7934 // Swap the Recursive back.
7935 E.Swapped.swap(E.Str);
7937 E.State = Recursive;
7943 /// Add the encoded TypeString to the cache only if it is NonRecursive or
7944 /// Recursive (viz: all sub-members were expanded as fully as possible).
7945 void TypeStringCache::addIfComplete(const IdentifierInfo *ID, StringRef Str,
7947 if (!ID || IncompleteUsedCount)
7948 return; // No key or it is is an incomplete sub-type so don't add.
7950 if (IsRecursive && !E.Str.empty()) {
7951 assert(E.State==Recursive && E.Str.size() == Str.size() &&
7952 "This is not the same Recursive entry");
7953 // The parent container was not recursive after all, so we could have used
7954 // this Recursive sub-member entry after all, but we assumed the worse when
7955 // we started viz: IncompleteCount!=0.
7958 assert(E.Str.empty() && "Entry already present");
7960 E.State = IsRecursive? Recursive : NonRecursive;
7963 /// Return a cached TypeString encoding for the ID. If there isn't one, or we
7964 /// are recursively expanding a type (IncompleteCount != 0) and the cached
7965 /// encoding is Recursive, return an empty StringRef.
7966 StringRef TypeStringCache::lookupStr(const IdentifierInfo *ID) {
7968 return StringRef(); // We have no key.
7969 auto I = Map.find(ID);
7971 return StringRef(); // We have no encoding.
7972 Entry &E = I->second;
7973 if (E.State == Recursive && IncompleteCount)
7974 return StringRef(); // We don't use Recursive encodings for member types.
7976 if (E.State == Incomplete) {
7977 // The incomplete type is being used to break out of recursion.
7978 E.State = IncompleteUsed;
7979 ++IncompleteUsedCount;
7984 /// The XCore ABI includes a type information section that communicates symbol
7985 /// type information to the linker. The linker uses this information to verify
7986 /// safety/correctness of things such as array bound and pointers et al.
7987 /// The ABI only requires C (and XC) language modules to emit TypeStrings.
7988 /// This type information (TypeString) is emitted into meta data for all global
7989 /// symbols: definitions, declarations, functions & variables.
7991 /// The TypeString carries type, qualifier, name, size & value details.
7992 /// Please see 'Tools Development Guide' section 2.16.2 for format details:
7993 /// https://www.xmos.com/download/public/Tools-Development-Guide%28X9114A%29.pdf
7994 /// The output is tested by test/CodeGen/xcore-stringtype.c.
7996 static bool getTypeString(SmallStringEnc &Enc, const Decl *D,
7997 CodeGen::CodeGenModule &CGM, TypeStringCache &TSC);
7999 /// XCore uses emitTargetMD to emit TypeString metadata for global symbols.
8000 void XCoreTargetCodeGenInfo::emitTargetMD(const Decl *D, llvm::GlobalValue *GV,
8001 CodeGen::CodeGenModule &CGM) const {
8003 if (getTypeString(Enc, D, CGM, TSC)) {
8004 llvm::LLVMContext &Ctx = CGM.getModule().getContext();
8005 llvm::Metadata *MDVals[] = {llvm::ConstantAsMetadata::get(GV),
8006 llvm::MDString::get(Ctx, Enc.str())};
8007 llvm::NamedMDNode *MD =
8008 CGM.getModule().getOrInsertNamedMetadata("xcore.typestrings");
8009 MD->addOperand(llvm::MDNode::get(Ctx, MDVals));
8013 //===----------------------------------------------------------------------===//
8014 // SPIR ABI Implementation
8015 //===----------------------------------------------------------------------===//
8018 class SPIRTargetCodeGenInfo : public TargetCodeGenInfo {
8020 SPIRTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
8021 : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {}
8022 void emitTargetMD(const Decl *D, llvm::GlobalValue *GV,
8023 CodeGen::CodeGenModule &M) const override;
8024 unsigned getOpenCLKernelCallingConv() const override;
8026 } // End anonymous namespace.
8028 /// Emit SPIR specific metadata: OpenCL and SPIR version.
8029 void SPIRTargetCodeGenInfo::emitTargetMD(const Decl *D, llvm::GlobalValue *GV,
8030 CodeGen::CodeGenModule &CGM) const {
8031 llvm::LLVMContext &Ctx = CGM.getModule().getContext();
8032 llvm::Type *Int32Ty = llvm::Type::getInt32Ty(Ctx);
8033 llvm::Module &M = CGM.getModule();
8034 // SPIR v2.0 s2.12 - The SPIR version used by the module is stored in the
8035 // opencl.spir.version named metadata.
8036 llvm::Metadata *SPIRVerElts[] = {
8037 llvm::ConstantAsMetadata::get(
8038 llvm::ConstantInt::get(Int32Ty, CGM.getLangOpts().OpenCLVersion / 100)),
8039 llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
8040 Int32Ty, (CGM.getLangOpts().OpenCLVersion / 100 > 1) ? 0 : 2))};
8041 llvm::NamedMDNode *SPIRVerMD =
8042 M.getOrInsertNamedMetadata("opencl.spir.version");
8043 SPIRVerMD->addOperand(llvm::MDNode::get(Ctx, SPIRVerElts));
8044 appendOpenCLVersionMD(CGM);
8047 static void appendOpenCLVersionMD(CodeGen::CodeGenModule &CGM) {
8048 llvm::LLVMContext &Ctx = CGM.getModule().getContext();
8049 llvm::Type *Int32Ty = llvm::Type::getInt32Ty(Ctx);
8050 llvm::Module &M = CGM.getModule();
8051 // SPIR v2.0 s2.13 - The OpenCL version used by the module is stored in the
8052 // opencl.ocl.version named metadata node.
8053 llvm::Metadata *OCLVerElts[] = {
8054 llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
8055 Int32Ty, CGM.getLangOpts().OpenCLVersion / 100)),
8056 llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
8057 Int32Ty, (CGM.getLangOpts().OpenCLVersion % 100) / 10))};
8058 llvm::NamedMDNode *OCLVerMD =
8059 M.getOrInsertNamedMetadata("opencl.ocl.version");
8060 OCLVerMD->addOperand(llvm::MDNode::get(Ctx, OCLVerElts));
8063 unsigned SPIRTargetCodeGenInfo::getOpenCLKernelCallingConv() const {
8064 return llvm::CallingConv::SPIR_KERNEL;
8067 static bool appendType(SmallStringEnc &Enc, QualType QType,
8068 const CodeGen::CodeGenModule &CGM,
8069 TypeStringCache &TSC);
8071 /// Helper function for appendRecordType().
8072 /// Builds a SmallVector containing the encoded field types in declaration
8074 static bool extractFieldType(SmallVectorImpl<FieldEncoding> &FE,
8075 const RecordDecl *RD,
8076 const CodeGen::CodeGenModule &CGM,
8077 TypeStringCache &TSC) {
8078 for (const auto *Field : RD->fields()) {
8081 Enc += Field->getName();
8083 if (Field->isBitField()) {
8085 llvm::raw_svector_ostream OS(Enc);
8086 OS << Field->getBitWidthValue(CGM.getContext());
8089 if (!appendType(Enc, Field->getType(), CGM, TSC))
8091 if (Field->isBitField())
8094 FE.emplace_back(!Field->getName().empty(), Enc);
8099 /// Appends structure and union types to Enc and adds encoding to cache.
8100 /// Recursively calls appendType (via extractFieldType) for each field.
8101 /// Union types have their fields ordered according to the ABI.
8102 static bool appendRecordType(SmallStringEnc &Enc, const RecordType *RT,
8103 const CodeGen::CodeGenModule &CGM,
8104 TypeStringCache &TSC, const IdentifierInfo *ID) {
8105 // Append the cached TypeString if we have one.
8106 StringRef TypeString = TSC.lookupStr(ID);
8107 if (!TypeString.empty()) {
8112 // Start to emit an incomplete TypeString.
8113 size_t Start = Enc.size();
8114 Enc += (RT->isUnionType()? 'u' : 's');
8117 Enc += ID->getName();
8120 // We collect all encoded fields and order as necessary.
8121 bool IsRecursive = false;
8122 const RecordDecl *RD = RT->getDecl()->getDefinition();
8123 if (RD && !RD->field_empty()) {
8124 // An incomplete TypeString stub is placed in the cache for this RecordType
8125 // so that recursive calls to this RecordType will use it whilst building a
8126 // complete TypeString for this RecordType.
8127 SmallVector<FieldEncoding, 16> FE;
8128 std::string StubEnc(Enc.substr(Start).str());
8129 StubEnc += '}'; // StubEnc now holds a valid incomplete TypeString.
8130 TSC.addIncomplete(ID, std::move(StubEnc));
8131 if (!extractFieldType(FE, RD, CGM, TSC)) {
8132 (void) TSC.removeIncomplete(ID);
8135 IsRecursive = TSC.removeIncomplete(ID);
8136 // The ABI requires unions to be sorted but not structures.
8137 // See FieldEncoding::operator< for sort algorithm.
8138 if (RT->isUnionType())
8139 std::sort(FE.begin(), FE.end());
8140 // We can now complete the TypeString.
8141 unsigned E = FE.size();
8142 for (unsigned I = 0; I != E; ++I) {
8149 TSC.addIfComplete(ID, Enc.substr(Start), IsRecursive);
8153 /// Appends enum types to Enc and adds the encoding to the cache.
8154 static bool appendEnumType(SmallStringEnc &Enc, const EnumType *ET,
8155 TypeStringCache &TSC,
8156 const IdentifierInfo *ID) {
8157 // Append the cached TypeString if we have one.
8158 StringRef TypeString = TSC.lookupStr(ID);
8159 if (!TypeString.empty()) {
8164 size_t Start = Enc.size();
8167 Enc += ID->getName();
8170 // We collect all encoded enumerations and order them alphanumerically.
8171 if (const EnumDecl *ED = ET->getDecl()->getDefinition()) {
8172 SmallVector<FieldEncoding, 16> FE;
8173 for (auto I = ED->enumerator_begin(), E = ED->enumerator_end(); I != E;
8175 SmallStringEnc EnumEnc;
8177 EnumEnc += I->getName();
8179 I->getInitVal().toString(EnumEnc);
8181 FE.push_back(FieldEncoding(!I->getName().empty(), EnumEnc));
8183 std::sort(FE.begin(), FE.end());
8184 unsigned E = FE.size();
8185 for (unsigned I = 0; I != E; ++I) {
8192 TSC.addIfComplete(ID, Enc.substr(Start), false);
8196 /// Appends type's qualifier to Enc.
8197 /// This is done prior to appending the type's encoding.
8198 static void appendQualifier(SmallStringEnc &Enc, QualType QT) {
8199 // Qualifiers are emitted in alphabetical order.
8200 static const char *const Table[]={"","c:","r:","cr:","v:","cv:","rv:","crv:"};
8202 if (QT.isConstQualified())
8204 if (QT.isRestrictQualified())
8206 if (QT.isVolatileQualified())
8208 Enc += Table[Lookup];
8211 /// Appends built-in types to Enc.
8212 static bool appendBuiltinType(SmallStringEnc &Enc, const BuiltinType *BT) {
8213 const char *EncType;
8214 switch (BT->getKind()) {
8215 case BuiltinType::Void:
8218 case BuiltinType::Bool:
8221 case BuiltinType::Char_U:
8224 case BuiltinType::UChar:
8227 case BuiltinType::SChar:
8230 case BuiltinType::UShort:
8233 case BuiltinType::Short:
8236 case BuiltinType::UInt:
8239 case BuiltinType::Int:
8242 case BuiltinType::ULong:
8245 case BuiltinType::Long:
8248 case BuiltinType::ULongLong:
8251 case BuiltinType::LongLong:
8254 case BuiltinType::Float:
8257 case BuiltinType::Double:
8260 case BuiltinType::LongDouble:
8270 /// Appends a pointer encoding to Enc before calling appendType for the pointee.
8271 static bool appendPointerType(SmallStringEnc &Enc, const PointerType *PT,
8272 const CodeGen::CodeGenModule &CGM,
8273 TypeStringCache &TSC) {
8275 if (!appendType(Enc, PT->getPointeeType(), CGM, TSC))
8281 /// Appends array encoding to Enc before calling appendType for the element.
8282 static bool appendArrayType(SmallStringEnc &Enc, QualType QT,
8283 const ArrayType *AT,
8284 const CodeGen::CodeGenModule &CGM,
8285 TypeStringCache &TSC, StringRef NoSizeEnc) {
8286 if (AT->getSizeModifier() != ArrayType::Normal)
8289 if (const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(AT))
8290 CAT->getSize().toStringUnsigned(Enc);
8292 Enc += NoSizeEnc; // Global arrays use "*", otherwise it is "".
8294 // The Qualifiers should be attached to the type rather than the array.
8295 appendQualifier(Enc, QT);
8296 if (!appendType(Enc, AT->getElementType(), CGM, TSC))
8302 /// Appends a function encoding to Enc, calling appendType for the return type
8303 /// and the arguments.
8304 static bool appendFunctionType(SmallStringEnc &Enc, const FunctionType *FT,
8305 const CodeGen::CodeGenModule &CGM,
8306 TypeStringCache &TSC) {
8308 if (!appendType(Enc, FT->getReturnType(), CGM, TSC))
8311 if (const FunctionProtoType *FPT = FT->getAs<FunctionProtoType>()) {
8312 // N.B. we are only interested in the adjusted param types.
8313 auto I = FPT->param_type_begin();
8314 auto E = FPT->param_type_end();
8317 if (!appendType(Enc, *I, CGM, TSC))
8323 if (FPT->isVariadic())
8326 if (FPT->isVariadic())
8336 /// Handles the type's qualifier before dispatching a call to handle specific
8338 static bool appendType(SmallStringEnc &Enc, QualType QType,
8339 const CodeGen::CodeGenModule &CGM,
8340 TypeStringCache &TSC) {
8342 QualType QT = QType.getCanonicalType();
8344 if (const ArrayType *AT = QT->getAsArrayTypeUnsafe())
8345 // The Qualifiers should be attached to the type rather than the array.
8346 // Thus we don't call appendQualifier() here.
8347 return appendArrayType(Enc, QT, AT, CGM, TSC, "");
8349 appendQualifier(Enc, QT);
8351 if (const BuiltinType *BT = QT->getAs<BuiltinType>())
8352 return appendBuiltinType(Enc, BT);
8354 if (const PointerType *PT = QT->getAs<PointerType>())
8355 return appendPointerType(Enc, PT, CGM, TSC);
8357 if (const EnumType *ET = QT->getAs<EnumType>())
8358 return appendEnumType(Enc, ET, TSC, QT.getBaseTypeIdentifier());
8360 if (const RecordType *RT = QT->getAsStructureType())
8361 return appendRecordType(Enc, RT, CGM, TSC, QT.getBaseTypeIdentifier());
8363 if (const RecordType *RT = QT->getAsUnionType())
8364 return appendRecordType(Enc, RT, CGM, TSC, QT.getBaseTypeIdentifier());
8366 if (const FunctionType *FT = QT->getAs<FunctionType>())
8367 return appendFunctionType(Enc, FT, CGM, TSC);
8372 static bool getTypeString(SmallStringEnc &Enc, const Decl *D,
8373 CodeGen::CodeGenModule &CGM, TypeStringCache &TSC) {
8377 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
8378 if (FD->getLanguageLinkage() != CLanguageLinkage)
8380 return appendType(Enc, FD->getType(), CGM, TSC);
8383 if (const VarDecl *VD = dyn_cast<VarDecl>(D)) {
8384 if (VD->getLanguageLinkage() != CLanguageLinkage)
8386 QualType QT = VD->getType().getCanonicalType();
8387 if (const ArrayType *AT = QT->getAsArrayTypeUnsafe()) {
8388 // Global ArrayTypes are given a size of '*' if the size is unknown.
8389 // The Qualifiers should be attached to the type rather than the array.
8390 // Thus we don't call appendQualifier() here.
8391 return appendArrayType(Enc, QT, AT, CGM, TSC, "*");
8393 return appendType(Enc, QT, CGM, TSC);
8399 //===----------------------------------------------------------------------===//
8401 //===----------------------------------------------------------------------===//
8403 bool CodeGenModule::supportsCOMDAT() const {
8404 return getTriple().supportsCOMDAT();
8407 const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() {
8408 if (TheTargetCodeGenInfo)
8409 return *TheTargetCodeGenInfo;
8411 // Helper to set the unique_ptr while still keeping the return value.
8412 auto SetCGInfo = [&](TargetCodeGenInfo *P) -> const TargetCodeGenInfo & {
8413 this->TheTargetCodeGenInfo.reset(P);
8417 const llvm::Triple &Triple = getTarget().getTriple();
8418 switch (Triple.getArch()) {
8420 return SetCGInfo(new DefaultTargetCodeGenInfo(Types));
8422 case llvm::Triple::le32:
8423 return SetCGInfo(new PNaClTargetCodeGenInfo(Types));
8424 case llvm::Triple::mips:
8425 case llvm::Triple::mipsel:
8426 if (Triple.getOS() == llvm::Triple::NaCl)
8427 return SetCGInfo(new PNaClTargetCodeGenInfo(Types));
8428 return SetCGInfo(new MIPSTargetCodeGenInfo(Types, true));
8430 case llvm::Triple::mips64:
8431 case llvm::Triple::mips64el:
8432 return SetCGInfo(new MIPSTargetCodeGenInfo(Types, false));
8434 case llvm::Triple::avr:
8435 return SetCGInfo(new AVRTargetCodeGenInfo(Types));
8437 case llvm::Triple::aarch64:
8438 case llvm::Triple::aarch64_be: {
8439 AArch64ABIInfo::ABIKind Kind = AArch64ABIInfo::AAPCS;
8440 if (getTarget().getABI() == "darwinpcs")
8441 Kind = AArch64ABIInfo::DarwinPCS;
8443 return SetCGInfo(new AArch64TargetCodeGenInfo(Types, Kind));
8446 case llvm::Triple::wasm32:
8447 case llvm::Triple::wasm64:
8448 return SetCGInfo(new WebAssemblyTargetCodeGenInfo(Types));
8450 case llvm::Triple::arm:
8451 case llvm::Triple::armeb:
8452 case llvm::Triple::thumb:
8453 case llvm::Triple::thumbeb: {
8454 if (Triple.getOS() == llvm::Triple::Win32) {
8456 new WindowsARMTargetCodeGenInfo(Types, ARMABIInfo::AAPCS_VFP));
8459 ARMABIInfo::ABIKind Kind = ARMABIInfo::AAPCS;
8460 StringRef ABIStr = getTarget().getABI();
8461 if (ABIStr == "apcs-gnu")
8462 Kind = ARMABIInfo::APCS;
8463 else if (ABIStr == "aapcs16")
8464 Kind = ARMABIInfo::AAPCS16_VFP;
8465 else if (CodeGenOpts.FloatABI == "hard" ||
8466 (CodeGenOpts.FloatABI != "soft" &&
8467 (Triple.getEnvironment() == llvm::Triple::GNUEABIHF ||
8468 Triple.getEnvironment() == llvm::Triple::MuslEABIHF ||
8469 Triple.getEnvironment() == llvm::Triple::EABIHF)))
8470 Kind = ARMABIInfo::AAPCS_VFP;
8472 return SetCGInfo(new ARMTargetCodeGenInfo(Types, Kind));
8475 case llvm::Triple::ppc:
8477 new PPC32TargetCodeGenInfo(Types, CodeGenOpts.FloatABI == "soft"));
8478 case llvm::Triple::ppc64:
8479 if (Triple.isOSBinFormatELF()) {
8480 PPC64_SVR4_ABIInfo::ABIKind Kind = PPC64_SVR4_ABIInfo::ELFv1;
8481 if (getTarget().getABI() == "elfv2")
8482 Kind = PPC64_SVR4_ABIInfo::ELFv2;
8483 bool HasQPX = getTarget().getABI() == "elfv1-qpx";
8484 bool IsSoftFloat = CodeGenOpts.FloatABI == "soft";
8486 return SetCGInfo(new PPC64_SVR4_TargetCodeGenInfo(Types, Kind, HasQPX,
8489 return SetCGInfo(new PPC64TargetCodeGenInfo(Types));
8490 case llvm::Triple::ppc64le: {
8491 assert(Triple.isOSBinFormatELF() && "PPC64 LE non-ELF not supported!");
8492 PPC64_SVR4_ABIInfo::ABIKind Kind = PPC64_SVR4_ABIInfo::ELFv2;
8493 if (getTarget().getABI() == "elfv1" || getTarget().getABI() == "elfv1-qpx")
8494 Kind = PPC64_SVR4_ABIInfo::ELFv1;
8495 bool HasQPX = getTarget().getABI() == "elfv1-qpx";
8496 bool IsSoftFloat = CodeGenOpts.FloatABI == "soft";
8498 return SetCGInfo(new PPC64_SVR4_TargetCodeGenInfo(Types, Kind, HasQPX,
8502 case llvm::Triple::nvptx:
8503 case llvm::Triple::nvptx64:
8504 return SetCGInfo(new NVPTXTargetCodeGenInfo(Types));
8506 case llvm::Triple::msp430:
8507 return SetCGInfo(new MSP430TargetCodeGenInfo(Types));
8509 case llvm::Triple::systemz: {
8510 bool HasVector = getTarget().getABI() == "vector";
8511 return SetCGInfo(new SystemZTargetCodeGenInfo(Types, HasVector));
8514 case llvm::Triple::tce:
8515 case llvm::Triple::tcele:
8516 return SetCGInfo(new TCETargetCodeGenInfo(Types));
8518 case llvm::Triple::x86: {
8519 bool IsDarwinVectorABI = Triple.isOSDarwin();
8520 bool RetSmallStructInRegABI =
8521 X86_32TargetCodeGenInfo::isStructReturnInRegABI(Triple, CodeGenOpts);
8522 bool IsWin32FloatStructABI = Triple.isOSWindows() && !Triple.isOSCygMing();
8524 if (Triple.getOS() == llvm::Triple::Win32) {
8525 return SetCGInfo(new WinX86_32TargetCodeGenInfo(
8526 Types, IsDarwinVectorABI, RetSmallStructInRegABI,
8527 IsWin32FloatStructABI, CodeGenOpts.NumRegisterParameters));
8529 return SetCGInfo(new X86_32TargetCodeGenInfo(
8530 Types, IsDarwinVectorABI, RetSmallStructInRegABI,
8531 IsWin32FloatStructABI, CodeGenOpts.NumRegisterParameters,
8532 CodeGenOpts.FloatABI == "soft"));
8536 case llvm::Triple::x86_64: {
8537 StringRef ABI = getTarget().getABI();
8538 X86AVXABILevel AVXLevel =
8540 ? X86AVXABILevel::AVX512
8541 : ABI == "avx" ? X86AVXABILevel::AVX : X86AVXABILevel::None);
8543 switch (Triple.getOS()) {
8544 case llvm::Triple::Win32:
8545 return SetCGInfo(new WinX86_64TargetCodeGenInfo(Types, AVXLevel));
8546 case llvm::Triple::PS4:
8547 return SetCGInfo(new PS4TargetCodeGenInfo(Types, AVXLevel));
8549 return SetCGInfo(new X86_64TargetCodeGenInfo(Types, AVXLevel));
8552 case llvm::Triple::hexagon:
8553 return SetCGInfo(new HexagonTargetCodeGenInfo(Types));
8554 case llvm::Triple::lanai:
8555 return SetCGInfo(new LanaiTargetCodeGenInfo(Types));
8556 case llvm::Triple::r600:
8557 return SetCGInfo(new AMDGPUTargetCodeGenInfo(Types));
8558 case llvm::Triple::amdgcn:
8559 return SetCGInfo(new AMDGPUTargetCodeGenInfo(Types));
8560 case llvm::Triple::sparc:
8561 return SetCGInfo(new SparcV8TargetCodeGenInfo(Types));
8562 case llvm::Triple::sparcv9:
8563 return SetCGInfo(new SparcV9TargetCodeGenInfo(Types));
8564 case llvm::Triple::xcore:
8565 return SetCGInfo(new XCoreTargetCodeGenInfo(Types));
8566 case llvm::Triple::spir:
8567 case llvm::Triple::spir64:
8568 return SetCGInfo(new SPIRTargetCodeGenInfo(Types));