1 //===---- TargetInfo.cpp - Encapsulate target details -----------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // These classes wrap the information about a call or function
11 // definition used to handle ABI compliancy.
13 //===----------------------------------------------------------------------===//
15 #include "TargetInfo.h"
19 #include "CodeGenFunction.h"
20 #include "clang/AST/RecordLayout.h"
21 #include "clang/CodeGen/CGFunctionInfo.h"
22 #include "clang/CodeGen/SwiftCallingConv.h"
23 #include "clang/Frontend/CodeGenOptions.h"
24 #include "llvm/ADT/StringExtras.h"
25 #include "llvm/ADT/Triple.h"
26 #include "llvm/IR/DataLayout.h"
27 #include "llvm/IR/Type.h"
28 #include "llvm/Support/raw_ostream.h"
29 #include <algorithm> // std::sort
31 using namespace clang;
32 using namespace CodeGen;
34 // Helper for coercing an aggregate argument or return value into an integer
35 // array of the same size (including padding) and alignment. This alternate
36 // coercion happens only for the RenderScript ABI and can be removed after
37 // runtimes that rely on it are no longer supported.
39 // RenderScript assumes that the size of the argument / return value in the IR
40 // is the same as the size of the corresponding qualified type. This helper
41 // coerces the aggregate type into an array of the same size (including
42 // padding). This coercion is used in lieu of expansion of struct members or
43 // other canonical coercions that return a coerced-type of larger size.
45 // Ty - The argument / return value type
46 // Context - The associated ASTContext
47 // LLVMContext - The associated LLVMContext
48 static ABIArgInfo coerceToIntArray(QualType Ty,
50 llvm::LLVMContext &LLVMContext) {
51 // Alignment and Size are measured in bits.
52 const uint64_t Size = Context.getTypeSize(Ty);
53 const uint64_t Alignment = Context.getTypeAlign(Ty);
54 llvm::Type *IntType = llvm::Type::getIntNTy(LLVMContext, Alignment);
55 const uint64_t NumElements = (Size + Alignment - 1) / Alignment;
56 return ABIArgInfo::getDirect(llvm::ArrayType::get(IntType, NumElements));
59 static void AssignToArrayRange(CodeGen::CGBuilderTy &Builder,
64 // Alternatively, we could emit this as a loop in the source.
65 for (unsigned I = FirstIndex; I <= LastIndex; ++I) {
67 Builder.CreateConstInBoundsGEP1_32(Builder.getInt8Ty(), Array, I);
68 Builder.CreateAlignedStore(Value, Cell, CharUnits::One());
72 static bool isAggregateTypeForABI(QualType T) {
73 return !CodeGenFunction::hasScalarEvaluationKind(T) ||
74 T->isMemberFunctionPointerType();
78 ABIInfo::getNaturalAlignIndirect(QualType Ty, bool ByRef, bool Realign,
79 llvm::Type *Padding) const {
80 return ABIArgInfo::getIndirect(getContext().getTypeAlignInChars(Ty),
81 ByRef, Realign, Padding);
85 ABIInfo::getNaturalAlignIndirectInReg(QualType Ty, bool Realign) const {
86 return ABIArgInfo::getIndirectInReg(getContext().getTypeAlignInChars(Ty),
87 /*ByRef*/ false, Realign);
90 Address ABIInfo::EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
92 return Address::invalid();
95 ABIInfo::~ABIInfo() {}
97 /// Does the given lowering require more than the given number of
98 /// registers when expanded?
100 /// This is intended to be the basis of a reasonable basic implementation
101 /// of should{Pass,Return}IndirectlyForSwift.
103 /// For most targets, a limit of four total registers is reasonable; this
104 /// limits the amount of code required in order to move around the value
105 /// in case it wasn't produced immediately prior to the call by the caller
106 /// (or wasn't produced in exactly the right registers) or isn't used
107 /// immediately within the callee. But some targets may need to further
108 /// limit the register count due to an inability to support that many
109 /// return registers.
110 static bool occupiesMoreThan(CodeGenTypes &cgt,
111 ArrayRef<llvm::Type*> scalarTypes,
112 unsigned maxAllRegisters) {
113 unsigned intCount = 0, fpCount = 0;
114 for (llvm::Type *type : scalarTypes) {
115 if (type->isPointerTy()) {
117 } else if (auto intTy = dyn_cast<llvm::IntegerType>(type)) {
118 auto ptrWidth = cgt.getTarget().getPointerWidth(0);
119 intCount += (intTy->getBitWidth() + ptrWidth - 1) / ptrWidth;
121 assert(type->isVectorTy() || type->isFloatingPointTy());
126 return (intCount + fpCount > maxAllRegisters);
129 bool SwiftABIInfo::isLegalVectorTypeForSwift(CharUnits vectorSize,
131 unsigned numElts) const {
132 // The default implementation of this assumes that the target guarantees
133 // 128-bit SIMD support but nothing more.
134 return (vectorSize.getQuantity() > 8 && vectorSize.getQuantity() <= 16);
137 static CGCXXABI::RecordArgABI getRecordArgABI(const RecordType *RT,
139 const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl());
141 return CGCXXABI::RAA_Default;
142 return CXXABI.getRecordArgABI(RD);
145 static CGCXXABI::RecordArgABI getRecordArgABI(QualType T,
147 const RecordType *RT = T->getAs<RecordType>();
149 return CGCXXABI::RAA_Default;
150 return getRecordArgABI(RT, CXXABI);
153 /// Pass transparent unions as if they were the type of the first element. Sema
154 /// should ensure that all elements of the union have the same "machine type".
155 static QualType useFirstFieldIfTransparentUnion(QualType Ty) {
156 if (const RecordType *UT = Ty->getAsUnionType()) {
157 const RecordDecl *UD = UT->getDecl();
158 if (UD->hasAttr<TransparentUnionAttr>()) {
159 assert(!UD->field_empty() && "sema created an empty transparent union");
160 return UD->field_begin()->getType();
166 CGCXXABI &ABIInfo::getCXXABI() const {
167 return CGT.getCXXABI();
170 ASTContext &ABIInfo::getContext() const {
171 return CGT.getContext();
174 llvm::LLVMContext &ABIInfo::getVMContext() const {
175 return CGT.getLLVMContext();
178 const llvm::DataLayout &ABIInfo::getDataLayout() const {
179 return CGT.getDataLayout();
182 const TargetInfo &ABIInfo::getTarget() const {
183 return CGT.getTarget();
186 bool ABIInfo:: isAndroid() const { return getTarget().getTriple().isAndroid(); }
188 bool ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
192 bool ABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base,
193 uint64_t Members) const {
197 bool ABIInfo::shouldSignExtUnsignedType(QualType Ty) const {
201 LLVM_DUMP_METHOD void ABIArgInfo::dump() const {
202 raw_ostream &OS = llvm::errs();
203 OS << "(ABIArgInfo Kind=";
206 OS << "Direct Type=";
207 if (llvm::Type *Ty = getCoerceToType())
219 OS << "InAlloca Offset=" << getInAllocaFieldIndex();
222 OS << "Indirect Align=" << getIndirectAlign().getQuantity()
223 << " ByVal=" << getIndirectByVal()
224 << " Realign=" << getIndirectRealign();
229 case CoerceAndExpand:
230 OS << "CoerceAndExpand Type=";
231 getCoerceAndExpandType()->print(OS);
237 // Dynamically round a pointer up to a multiple of the given alignment.
238 static llvm::Value *emitRoundPointerUpToAlignment(CodeGenFunction &CGF,
241 llvm::Value *PtrAsInt = Ptr;
242 // OverflowArgArea = (OverflowArgArea + Align - 1) & -Align;
243 PtrAsInt = CGF.Builder.CreatePtrToInt(PtrAsInt, CGF.IntPtrTy);
244 PtrAsInt = CGF.Builder.CreateAdd(PtrAsInt,
245 llvm::ConstantInt::get(CGF.IntPtrTy, Align.getQuantity() - 1));
246 PtrAsInt = CGF.Builder.CreateAnd(PtrAsInt,
247 llvm::ConstantInt::get(CGF.IntPtrTy, -Align.getQuantity()));
248 PtrAsInt = CGF.Builder.CreateIntToPtr(PtrAsInt,
250 Ptr->getName() + ".aligned");
254 /// Emit va_arg for a platform using the common void* representation,
255 /// where arguments are simply emitted in an array of slots on the stack.
257 /// This version implements the core direct-value passing rules.
259 /// \param SlotSize - The size and alignment of a stack slot.
260 /// Each argument will be allocated to a multiple of this number of
261 /// slots, and all the slots will be aligned to this value.
262 /// \param AllowHigherAlign - The slot alignment is not a cap;
263 /// an argument type with an alignment greater than the slot size
264 /// will be emitted on a higher-alignment address, potentially
265 /// leaving one or more empty slots behind as padding. If this
266 /// is false, the returned address might be less-aligned than
268 static Address emitVoidPtrDirectVAArg(CodeGenFunction &CGF,
270 llvm::Type *DirectTy,
271 CharUnits DirectSize,
272 CharUnits DirectAlign,
274 bool AllowHigherAlign) {
275 // Cast the element type to i8* if necessary. Some platforms define
276 // va_list as a struct containing an i8* instead of just an i8*.
277 if (VAListAddr.getElementType() != CGF.Int8PtrTy)
278 VAListAddr = CGF.Builder.CreateElementBitCast(VAListAddr, CGF.Int8PtrTy);
280 llvm::Value *Ptr = CGF.Builder.CreateLoad(VAListAddr, "argp.cur");
282 // If the CC aligns values higher than the slot size, do so if needed.
283 Address Addr = Address::invalid();
284 if (AllowHigherAlign && DirectAlign > SlotSize) {
285 Addr = Address(emitRoundPointerUpToAlignment(CGF, Ptr, DirectAlign),
288 Addr = Address(Ptr, SlotSize);
291 // Advance the pointer past the argument, then store that back.
292 CharUnits FullDirectSize = DirectSize.alignTo(SlotSize);
293 llvm::Value *NextPtr =
294 CGF.Builder.CreateConstInBoundsByteGEP(Addr.getPointer(), FullDirectSize,
296 CGF.Builder.CreateStore(NextPtr, VAListAddr);
298 // If the argument is smaller than a slot, and this is a big-endian
299 // target, the argument will be right-adjusted in its slot.
300 if (DirectSize < SlotSize && CGF.CGM.getDataLayout().isBigEndian() &&
301 !DirectTy->isStructTy()) {
302 Addr = CGF.Builder.CreateConstInBoundsByteGEP(Addr, SlotSize - DirectSize);
305 Addr = CGF.Builder.CreateElementBitCast(Addr, DirectTy);
309 /// Emit va_arg for a platform using the common void* representation,
310 /// where arguments are simply emitted in an array of slots on the stack.
312 /// \param IsIndirect - Values of this type are passed indirectly.
313 /// \param ValueInfo - The size and alignment of this type, generally
314 /// computed with getContext().getTypeInfoInChars(ValueTy).
315 /// \param SlotSizeAndAlign - The size and alignment of a stack slot.
316 /// Each argument will be allocated to a multiple of this number of
317 /// slots, and all the slots will be aligned to this value.
318 /// \param AllowHigherAlign - The slot alignment is not a cap;
319 /// an argument type with an alignment greater than the slot size
320 /// will be emitted on a higher-alignment address, potentially
321 /// leaving one or more empty slots behind as padding.
322 static Address emitVoidPtrVAArg(CodeGenFunction &CGF, Address VAListAddr,
323 QualType ValueTy, bool IsIndirect,
324 std::pair<CharUnits, CharUnits> ValueInfo,
325 CharUnits SlotSizeAndAlign,
326 bool AllowHigherAlign) {
327 // The size and alignment of the value that was passed directly.
328 CharUnits DirectSize, DirectAlign;
330 DirectSize = CGF.getPointerSize();
331 DirectAlign = CGF.getPointerAlign();
333 DirectSize = ValueInfo.first;
334 DirectAlign = ValueInfo.second;
337 // Cast the address we've calculated to the right type.
338 llvm::Type *DirectTy = CGF.ConvertTypeForMem(ValueTy);
340 DirectTy = DirectTy->getPointerTo(0);
342 Address Addr = emitVoidPtrDirectVAArg(CGF, VAListAddr, DirectTy,
343 DirectSize, DirectAlign,
348 Addr = Address(CGF.Builder.CreateLoad(Addr), ValueInfo.second);
355 static Address emitMergePHI(CodeGenFunction &CGF,
356 Address Addr1, llvm::BasicBlock *Block1,
357 Address Addr2, llvm::BasicBlock *Block2,
358 const llvm::Twine &Name = "") {
359 assert(Addr1.getType() == Addr2.getType());
360 llvm::PHINode *PHI = CGF.Builder.CreatePHI(Addr1.getType(), 2, Name);
361 PHI->addIncoming(Addr1.getPointer(), Block1);
362 PHI->addIncoming(Addr2.getPointer(), Block2);
363 CharUnits Align = std::min(Addr1.getAlignment(), Addr2.getAlignment());
364 return Address(PHI, Align);
367 TargetCodeGenInfo::~TargetCodeGenInfo() { delete Info; }
369 // If someone can figure out a general rule for this, that would be great.
370 // It's probably just doomed to be platform-dependent, though.
371 unsigned TargetCodeGenInfo::getSizeOfUnwindException() const {
373 // x86-64 FreeBSD, Linux, Darwin
374 // x86-32 FreeBSD, Linux, Darwin
375 // PowerPC Linux, Darwin
376 // ARM Darwin (*not* EABI)
381 bool TargetCodeGenInfo::isNoProtoCallVariadic(const CallArgList &args,
382 const FunctionNoProtoType *fnType) const {
383 // The following conventions are known to require this to be false:
386 // For everything else, we just prefer false unless we opt out.
391 TargetCodeGenInfo::getDependentLibraryOption(llvm::StringRef Lib,
392 llvm::SmallString<24> &Opt) const {
393 // This assumes the user is passing a library name like "rt" instead of a
394 // filename like "librt.a/so", and that they don't care whether it's static or
400 unsigned TargetCodeGenInfo::getOpenCLKernelCallingConv() const {
401 return llvm::CallingConv::C;
404 llvm::Constant *TargetCodeGenInfo::getNullPointer(const CodeGen::CodeGenModule &CGM,
405 llvm::PointerType *T, QualType QT) const {
406 return llvm::ConstantPointerNull::get(T);
409 llvm::Value *TargetCodeGenInfo::performAddrSpaceCast(
410 CodeGen::CodeGenFunction &CGF, llvm::Value *Src, QualType SrcTy,
411 QualType DestTy) const {
412 // Since target may map different address spaces in AST to the same address
413 // space, an address space conversion may end up as a bitcast.
414 return CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(Src,
415 CGF.ConvertType(DestTy));
418 static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays);
420 /// isEmptyField - Return true iff a the field is "empty", that is it
421 /// is an unnamed bit-field or an (array of) empty record(s).
422 static bool isEmptyField(ASTContext &Context, const FieldDecl *FD,
424 if (FD->isUnnamedBitfield())
427 QualType FT = FD->getType();
429 // Constant arrays of empty records count as empty, strip them off.
430 // Constant arrays of zero length always count as empty.
432 while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) {
433 if (AT->getSize() == 0)
435 FT = AT->getElementType();
438 const RecordType *RT = FT->getAs<RecordType>();
442 // C++ record fields are never empty, at least in the Itanium ABI.
444 // FIXME: We should use a predicate for whether this behavior is true in the
446 if (isa<CXXRecordDecl>(RT->getDecl()))
449 return isEmptyRecord(Context, FT, AllowArrays);
452 /// isEmptyRecord - Return true iff a structure contains only empty
453 /// fields. Note that a structure with a flexible array member is not
454 /// considered empty.
455 static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays) {
456 const RecordType *RT = T->getAs<RecordType>();
459 const RecordDecl *RD = RT->getDecl();
460 if (RD->hasFlexibleArrayMember())
463 // If this is a C++ record, check the bases first.
464 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
465 for (const auto &I : CXXRD->bases())
466 if (!isEmptyRecord(Context, I.getType(), true))
469 for (const auto *I : RD->fields())
470 if (!isEmptyField(Context, I, AllowArrays))
475 /// isSingleElementStruct - Determine if a structure is a "single
476 /// element struct", i.e. it has exactly one non-empty field or
477 /// exactly one field which is itself a single element
478 /// struct. Structures with flexible array members are never
479 /// considered single element structs.
481 /// \return The field declaration for the single non-empty field, if
483 static const Type *isSingleElementStruct(QualType T, ASTContext &Context) {
484 const RecordType *RT = T->getAs<RecordType>();
488 const RecordDecl *RD = RT->getDecl();
489 if (RD->hasFlexibleArrayMember())
492 const Type *Found = nullptr;
494 // If this is a C++ record, check the bases first.
495 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
496 for (const auto &I : CXXRD->bases()) {
497 // Ignore empty records.
498 if (isEmptyRecord(Context, I.getType(), true))
501 // If we already found an element then this isn't a single-element struct.
505 // If this is non-empty and not a single element struct, the composite
506 // cannot be a single element struct.
507 Found = isSingleElementStruct(I.getType(), Context);
513 // Check for single element.
514 for (const auto *FD : RD->fields()) {
515 QualType FT = FD->getType();
517 // Ignore empty fields.
518 if (isEmptyField(Context, FD, true))
521 // If we already found an element then this isn't a single-element
526 // Treat single element arrays as the element.
527 while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) {
528 if (AT->getSize().getZExtValue() != 1)
530 FT = AT->getElementType();
533 if (!isAggregateTypeForABI(FT)) {
534 Found = FT.getTypePtr();
536 Found = isSingleElementStruct(FT, Context);
542 // We don't consider a struct a single-element struct if it has
543 // padding beyond the element type.
544 if (Found && Context.getTypeSize(Found) != Context.getTypeSize(T))
551 Address EmitVAArgInstr(CodeGenFunction &CGF, Address VAListAddr, QualType Ty,
552 const ABIArgInfo &AI) {
553 // This default implementation defers to the llvm backend's va_arg
554 // instruction. It can handle only passing arguments directly
555 // (typically only handled in the backend for primitive types), or
556 // aggregates passed indirectly by pointer (NOTE: if the "byval"
557 // flag has ABI impact in the callee, this implementation cannot
560 // Only a few cases are covered here at the moment -- those needed
561 // by the default abi.
564 if (AI.isIndirect()) {
565 assert(!AI.getPaddingType() &&
566 "Unexpected PaddingType seen in arginfo in generic VAArg emitter!");
568 !AI.getIndirectRealign() &&
569 "Unexpected IndirectRealign seen in arginfo in generic VAArg emitter!");
571 auto TyInfo = CGF.getContext().getTypeInfoInChars(Ty);
572 CharUnits TyAlignForABI = TyInfo.second;
575 llvm::PointerType::getUnqual(CGF.ConvertTypeForMem(Ty));
577 CGF.Builder.CreateVAArg(VAListAddr.getPointer(), BaseTy);
578 return Address(Addr, TyAlignForABI);
580 assert((AI.isDirect() || AI.isExtend()) &&
581 "Unexpected ArgInfo Kind in generic VAArg emitter!");
583 assert(!AI.getInReg() &&
584 "Unexpected InReg seen in arginfo in generic VAArg emitter!");
585 assert(!AI.getPaddingType() &&
586 "Unexpected PaddingType seen in arginfo in generic VAArg emitter!");
587 assert(!AI.getDirectOffset() &&
588 "Unexpected DirectOffset seen in arginfo in generic VAArg emitter!");
589 assert(!AI.getCoerceToType() &&
590 "Unexpected CoerceToType seen in arginfo in generic VAArg emitter!");
592 Address Temp = CGF.CreateMemTemp(Ty, "varet");
593 Val = CGF.Builder.CreateVAArg(VAListAddr.getPointer(), CGF.ConvertType(Ty));
594 CGF.Builder.CreateStore(Val, Temp);
599 /// DefaultABIInfo - The default implementation for ABI specific
600 /// details. This implementation provides information which results in
601 /// self-consistent and sensible LLVM IR generation, but does not
602 /// conform to any particular ABI.
603 class DefaultABIInfo : public ABIInfo {
605 DefaultABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {}
607 ABIArgInfo classifyReturnType(QualType RetTy) const;
608 ABIArgInfo classifyArgumentType(QualType RetTy) const;
610 void computeInfo(CGFunctionInfo &FI) const override {
611 if (!getCXXABI().classifyReturnType(FI))
612 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
613 for (auto &I : FI.arguments())
614 I.info = classifyArgumentType(I.type);
617 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
618 QualType Ty) const override {
619 return EmitVAArgInstr(CGF, VAListAddr, Ty, classifyArgumentType(Ty));
623 class DefaultTargetCodeGenInfo : public TargetCodeGenInfo {
625 DefaultTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
626 : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {}
629 ABIArgInfo DefaultABIInfo::classifyArgumentType(QualType Ty) const {
630 Ty = useFirstFieldIfTransparentUnion(Ty);
632 if (isAggregateTypeForABI(Ty)) {
633 // Records with non-trivial destructors/copy-constructors should not be
635 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
636 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
638 return getNaturalAlignIndirect(Ty);
641 // Treat an enum type as its underlying type.
642 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
643 Ty = EnumTy->getDecl()->getIntegerType();
645 return (Ty->isPromotableIntegerType() ?
646 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
649 ABIArgInfo DefaultABIInfo::classifyReturnType(QualType RetTy) const {
650 if (RetTy->isVoidType())
651 return ABIArgInfo::getIgnore();
653 if (isAggregateTypeForABI(RetTy))
654 return getNaturalAlignIndirect(RetTy);
656 // Treat an enum type as its underlying type.
657 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
658 RetTy = EnumTy->getDecl()->getIntegerType();
660 return (RetTy->isPromotableIntegerType() ?
661 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
664 //===----------------------------------------------------------------------===//
665 // WebAssembly ABI Implementation
667 // This is a very simple ABI that relies a lot on DefaultABIInfo.
668 //===----------------------------------------------------------------------===//
670 class WebAssemblyABIInfo final : public DefaultABIInfo {
672 explicit WebAssemblyABIInfo(CodeGen::CodeGenTypes &CGT)
673 : DefaultABIInfo(CGT) {}
676 ABIArgInfo classifyReturnType(QualType RetTy) const;
677 ABIArgInfo classifyArgumentType(QualType Ty) const;
679 // DefaultABIInfo's classifyReturnType and classifyArgumentType are
680 // non-virtual, but computeInfo and EmitVAArg are virtual, so we
682 void computeInfo(CGFunctionInfo &FI) const override {
683 if (!getCXXABI().classifyReturnType(FI))
684 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
685 for (auto &Arg : FI.arguments())
686 Arg.info = classifyArgumentType(Arg.type);
689 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
690 QualType Ty) const override;
693 class WebAssemblyTargetCodeGenInfo final : public TargetCodeGenInfo {
695 explicit WebAssemblyTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
696 : TargetCodeGenInfo(new WebAssemblyABIInfo(CGT)) {}
699 /// \brief Classify argument of given type \p Ty.
700 ABIArgInfo WebAssemblyABIInfo::classifyArgumentType(QualType Ty) const {
701 Ty = useFirstFieldIfTransparentUnion(Ty);
703 if (isAggregateTypeForABI(Ty)) {
704 // Records with non-trivial destructors/copy-constructors should not be
706 if (auto RAA = getRecordArgABI(Ty, getCXXABI()))
707 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
708 // Ignore empty structs/unions.
709 if (isEmptyRecord(getContext(), Ty, true))
710 return ABIArgInfo::getIgnore();
711 // Lower single-element structs to just pass a regular value. TODO: We
712 // could do reasonable-size multiple-element structs too, using getExpand(),
713 // though watch out for things like bitfields.
714 if (const Type *SeltTy = isSingleElementStruct(Ty, getContext()))
715 return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
718 // Otherwise just do the default thing.
719 return DefaultABIInfo::classifyArgumentType(Ty);
722 ABIArgInfo WebAssemblyABIInfo::classifyReturnType(QualType RetTy) const {
723 if (isAggregateTypeForABI(RetTy)) {
724 // Records with non-trivial destructors/copy-constructors should not be
725 // returned by value.
726 if (!getRecordArgABI(RetTy, getCXXABI())) {
727 // Ignore empty structs/unions.
728 if (isEmptyRecord(getContext(), RetTy, true))
729 return ABIArgInfo::getIgnore();
730 // Lower single-element structs to just return a regular value. TODO: We
731 // could do reasonable-size multiple-element structs too, using
732 // ABIArgInfo::getDirect().
733 if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext()))
734 return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
738 // Otherwise just do the default thing.
739 return DefaultABIInfo::classifyReturnType(RetTy);
742 Address WebAssemblyABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
744 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*Indirect=*/ false,
745 getContext().getTypeInfoInChars(Ty),
746 CharUnits::fromQuantity(4),
747 /*AllowHigherAlign=*/ true);
750 //===----------------------------------------------------------------------===//
751 // le32/PNaCl bitcode ABI Implementation
753 // This is a simplified version of the x86_32 ABI. Arguments and return values
754 // are always passed on the stack.
755 //===----------------------------------------------------------------------===//
757 class PNaClABIInfo : public ABIInfo {
759 PNaClABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {}
761 ABIArgInfo classifyReturnType(QualType RetTy) const;
762 ABIArgInfo classifyArgumentType(QualType RetTy) const;
764 void computeInfo(CGFunctionInfo &FI) const override;
765 Address EmitVAArg(CodeGenFunction &CGF,
766 Address VAListAddr, QualType Ty) const override;
769 class PNaClTargetCodeGenInfo : public TargetCodeGenInfo {
771 PNaClTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
772 : TargetCodeGenInfo(new PNaClABIInfo(CGT)) {}
775 void PNaClABIInfo::computeInfo(CGFunctionInfo &FI) const {
776 if (!getCXXABI().classifyReturnType(FI))
777 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
779 for (auto &I : FI.arguments())
780 I.info = classifyArgumentType(I.type);
783 Address PNaClABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
785 // The PNaCL ABI is a bit odd, in that varargs don't use normal
786 // function classification. Structs get passed directly for varargs
787 // functions, through a rewriting transform in
788 // pnacl-llvm/lib/Transforms/NaCl/ExpandVarArgs.cpp, which allows
789 // this target to actually support a va_arg instructions with an
790 // aggregate type, unlike other targets.
791 return EmitVAArgInstr(CGF, VAListAddr, Ty, ABIArgInfo::getDirect());
794 /// \brief Classify argument of given type \p Ty.
795 ABIArgInfo PNaClABIInfo::classifyArgumentType(QualType Ty) const {
796 if (isAggregateTypeForABI(Ty)) {
797 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
798 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
799 return getNaturalAlignIndirect(Ty);
800 } else if (const EnumType *EnumTy = Ty->getAs<EnumType>()) {
801 // Treat an enum type as its underlying type.
802 Ty = EnumTy->getDecl()->getIntegerType();
803 } else if (Ty->isFloatingType()) {
804 // Floating-point types don't go inreg.
805 return ABIArgInfo::getDirect();
808 return (Ty->isPromotableIntegerType() ?
809 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
812 ABIArgInfo PNaClABIInfo::classifyReturnType(QualType RetTy) const {
813 if (RetTy->isVoidType())
814 return ABIArgInfo::getIgnore();
816 // In the PNaCl ABI we always return records/structures on the stack.
817 if (isAggregateTypeForABI(RetTy))
818 return getNaturalAlignIndirect(RetTy);
820 // Treat an enum type as its underlying type.
821 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
822 RetTy = EnumTy->getDecl()->getIntegerType();
824 return (RetTy->isPromotableIntegerType() ?
825 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
828 /// IsX86_MMXType - Return true if this is an MMX type.
829 bool IsX86_MMXType(llvm::Type *IRType) {
830 // Return true if the type is an MMX type <2 x i32>, <4 x i16>, or <8 x i8>.
831 return IRType->isVectorTy() && IRType->getPrimitiveSizeInBits() == 64 &&
832 cast<llvm::VectorType>(IRType)->getElementType()->isIntegerTy() &&
833 IRType->getScalarSizeInBits() != 64;
836 static llvm::Type* X86AdjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
837 StringRef Constraint,
839 if ((Constraint == "y" || Constraint == "&y") && Ty->isVectorTy()) {
840 if (cast<llvm::VectorType>(Ty)->getBitWidth() != 64) {
841 // Invalid MMX constraint
845 return llvm::Type::getX86_MMXTy(CGF.getLLVMContext());
848 // No operation needed
852 /// Returns true if this type can be passed in SSE registers with the
853 /// X86_VectorCall calling convention. Shared between x86_32 and x86_64.
854 static bool isX86VectorTypeForVectorCall(ASTContext &Context, QualType Ty) {
855 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
856 if (BT->isFloatingPoint() && BT->getKind() != BuiltinType::Half)
858 } else if (const VectorType *VT = Ty->getAs<VectorType>()) {
859 // vectorcall can pass XMM, YMM, and ZMM vectors. We don't pass SSE1 MMX
860 // registers specially.
861 unsigned VecSize = Context.getTypeSize(VT);
862 if (VecSize == 128 || VecSize == 256 || VecSize == 512)
868 /// Returns true if this aggregate is small enough to be passed in SSE registers
869 /// in the X86_VectorCall calling convention. Shared between x86_32 and x86_64.
870 static bool isX86VectorCallAggregateSmallEnough(uint64_t NumMembers) {
871 return NumMembers <= 4;
874 /// Returns a Homogeneous Vector Aggregate ABIArgInfo, used in X86.
875 static ABIArgInfo getDirectX86Hva(llvm::Type* T = nullptr) {
876 auto AI = ABIArgInfo::getDirect(T);
878 AI.setCanBeFlattened(false);
882 //===----------------------------------------------------------------------===//
883 // X86-32 ABI Implementation
884 //===----------------------------------------------------------------------===//
886 /// \brief Similar to llvm::CCState, but for Clang.
888 CCState(unsigned CC) : CC(CC), FreeRegs(0), FreeSSERegs(0) {}
892 unsigned FreeSSERegs;
896 // Vectorcall only allows the first 6 parameters to be passed in registers.
897 VectorcallMaxParamNumAsReg = 6
900 /// X86_32ABIInfo - The X86-32 ABI information.
901 class X86_32ABIInfo : public SwiftABIInfo {
907 static const unsigned MinABIStackAlignInBytes = 4;
909 bool IsDarwinVectorABI;
910 bool IsRetSmallStructInRegABI;
911 bool IsWin32StructABI;
914 unsigned DefaultNumRegisterParameters;
916 static bool isRegisterSize(unsigned Size) {
917 return (Size == 8 || Size == 16 || Size == 32 || Size == 64);
920 bool isHomogeneousAggregateBaseType(QualType Ty) const override {
921 // FIXME: Assumes vectorcall is in use.
922 return isX86VectorTypeForVectorCall(getContext(), Ty);
925 bool isHomogeneousAggregateSmallEnough(const Type *Ty,
926 uint64_t NumMembers) const override {
927 // FIXME: Assumes vectorcall is in use.
928 return isX86VectorCallAggregateSmallEnough(NumMembers);
931 bool shouldReturnTypeInRegister(QualType Ty, ASTContext &Context) const;
933 /// getIndirectResult - Give a source type \arg Ty, return a suitable result
934 /// such that the argument will be passed in memory.
935 ABIArgInfo getIndirectResult(QualType Ty, bool ByVal, CCState &State) const;
937 ABIArgInfo getIndirectReturnResult(QualType Ty, CCState &State) const;
939 /// \brief Return the alignment to use for the given type on the stack.
940 unsigned getTypeStackAlignInBytes(QualType Ty, unsigned Align) const;
942 Class classify(QualType Ty) const;
943 ABIArgInfo classifyReturnType(QualType RetTy, CCState &State) const;
944 ABIArgInfo classifyArgumentType(QualType RetTy, CCState &State) const;
945 ABIArgInfo reclassifyHvaArgType(QualType RetTy, CCState &State,
946 const ABIArgInfo& current) const;
947 /// \brief Updates the number of available free registers, returns
948 /// true if any registers were allocated.
949 bool updateFreeRegs(QualType Ty, CCState &State) const;
951 bool shouldAggregateUseDirect(QualType Ty, CCState &State, bool &InReg,
952 bool &NeedsPadding) const;
953 bool shouldPrimitiveUseInReg(QualType Ty, CCState &State) const;
955 bool canExpandIndirectArgument(QualType Ty) const;
957 /// \brief Rewrite the function info so that all memory arguments use
959 void rewriteWithInAlloca(CGFunctionInfo &FI) const;
961 void addFieldToArgStruct(SmallVector<llvm::Type *, 6> &FrameFields,
962 CharUnits &StackOffset, ABIArgInfo &Info,
963 QualType Type) const;
964 void computeVectorCallArgs(CGFunctionInfo &FI, CCState &State,
965 bool &UsedInAlloca) const;
969 void computeInfo(CGFunctionInfo &FI) const override;
970 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
971 QualType Ty) const override;
973 X86_32ABIInfo(CodeGen::CodeGenTypes &CGT, bool DarwinVectorABI,
974 bool RetSmallStructInRegABI, bool Win32StructABI,
975 unsigned NumRegisterParameters, bool SoftFloatABI)
976 : SwiftABIInfo(CGT), IsDarwinVectorABI(DarwinVectorABI),
977 IsRetSmallStructInRegABI(RetSmallStructInRegABI),
978 IsWin32StructABI(Win32StructABI),
979 IsSoftFloatABI(SoftFloatABI),
980 IsMCUABI(CGT.getTarget().getTriple().isOSIAMCU()),
981 DefaultNumRegisterParameters(NumRegisterParameters) {}
983 bool shouldPassIndirectlyForSwift(CharUnits totalSize,
984 ArrayRef<llvm::Type*> scalars,
985 bool asReturnValue) const override {
986 // LLVM's x86-32 lowering currently only assigns up to three
987 // integer registers and three fp registers. Oddly, it'll use up to
988 // four vector registers for vectors, but those can overlap with the
990 return occupiesMoreThan(CGT, scalars, /*total*/ 3);
993 bool isSwiftErrorInRegister() const override {
994 // x86-32 lowering does not support passing swifterror in a register.
999 class X86_32TargetCodeGenInfo : public TargetCodeGenInfo {
1001 X86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool DarwinVectorABI,
1002 bool RetSmallStructInRegABI, bool Win32StructABI,
1003 unsigned NumRegisterParameters, bool SoftFloatABI)
1004 : TargetCodeGenInfo(new X86_32ABIInfo(
1005 CGT, DarwinVectorABI, RetSmallStructInRegABI, Win32StructABI,
1006 NumRegisterParameters, SoftFloatABI)) {}
1008 static bool isStructReturnInRegABI(
1009 const llvm::Triple &Triple, const CodeGenOptions &Opts);
1011 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
1012 CodeGen::CodeGenModule &CGM) const override;
1014 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
1015 // Darwin uses different dwarf register numbers for EH.
1016 if (CGM.getTarget().getTriple().isOSDarwin()) return 5;
1020 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
1021 llvm::Value *Address) const override;
1023 llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
1024 StringRef Constraint,
1025 llvm::Type* Ty) const override {
1026 return X86AdjustInlineAsmType(CGF, Constraint, Ty);
1029 void addReturnRegisterOutputs(CodeGenFunction &CGF, LValue ReturnValue,
1030 std::string &Constraints,
1031 std::vector<llvm::Type *> &ResultRegTypes,
1032 std::vector<llvm::Type *> &ResultTruncRegTypes,
1033 std::vector<LValue> &ResultRegDests,
1034 std::string &AsmString,
1035 unsigned NumOutputs) const override;
1038 getUBSanFunctionSignature(CodeGen::CodeGenModule &CGM) const override {
1039 unsigned Sig = (0xeb << 0) | // jmp rel8
1040 (0x06 << 8) | // .+0x08
1043 return llvm::ConstantInt::get(CGM.Int32Ty, Sig);
1046 StringRef getARCRetainAutoreleasedReturnValueMarker() const override {
1047 return "movl\t%ebp, %ebp"
1048 "\t\t## marker for objc_retainAutoreleaseReturnValue";
1054 /// Rewrite input constraint references after adding some output constraints.
1055 /// In the case where there is one output and one input and we add one output,
1056 /// we need to replace all operand references greater than or equal to 1:
1059 /// The result will be:
1062 static void rewriteInputConstraintReferences(unsigned FirstIn,
1063 unsigned NumNewOuts,
1064 std::string &AsmString) {
1066 llvm::raw_string_ostream OS(Buf);
1068 while (Pos < AsmString.size()) {
1069 size_t DollarStart = AsmString.find('$', Pos);
1070 if (DollarStart == std::string::npos)
1071 DollarStart = AsmString.size();
1072 size_t DollarEnd = AsmString.find_first_not_of('$', DollarStart);
1073 if (DollarEnd == std::string::npos)
1074 DollarEnd = AsmString.size();
1075 OS << StringRef(&AsmString[Pos], DollarEnd - Pos);
1077 size_t NumDollars = DollarEnd - DollarStart;
1078 if (NumDollars % 2 != 0 && Pos < AsmString.size()) {
1079 // We have an operand reference.
1080 size_t DigitStart = Pos;
1081 size_t DigitEnd = AsmString.find_first_not_of("0123456789", DigitStart);
1082 if (DigitEnd == std::string::npos)
1083 DigitEnd = AsmString.size();
1084 StringRef OperandStr(&AsmString[DigitStart], DigitEnd - DigitStart);
1085 unsigned OperandIndex;
1086 if (!OperandStr.getAsInteger(10, OperandIndex)) {
1087 if (OperandIndex >= FirstIn)
1088 OperandIndex += NumNewOuts;
1096 AsmString = std::move(OS.str());
1099 /// Add output constraints for EAX:EDX because they are return registers.
1100 void X86_32TargetCodeGenInfo::addReturnRegisterOutputs(
1101 CodeGenFunction &CGF, LValue ReturnSlot, std::string &Constraints,
1102 std::vector<llvm::Type *> &ResultRegTypes,
1103 std::vector<llvm::Type *> &ResultTruncRegTypes,
1104 std::vector<LValue> &ResultRegDests, std::string &AsmString,
1105 unsigned NumOutputs) const {
1106 uint64_t RetWidth = CGF.getContext().getTypeSize(ReturnSlot.getType());
1108 // Use the EAX constraint if the width is 32 or smaller and EAX:EDX if it is
1110 if (!Constraints.empty())
1112 if (RetWidth <= 32) {
1113 Constraints += "={eax}";
1114 ResultRegTypes.push_back(CGF.Int32Ty);
1116 // Use the 'A' constraint for EAX:EDX.
1117 Constraints += "=A";
1118 ResultRegTypes.push_back(CGF.Int64Ty);
1121 // Truncate EAX or EAX:EDX to an integer of the appropriate size.
1122 llvm::Type *CoerceTy = llvm::IntegerType::get(CGF.getLLVMContext(), RetWidth);
1123 ResultTruncRegTypes.push_back(CoerceTy);
1125 // Coerce the integer by bitcasting the return slot pointer.
1126 ReturnSlot.setAddress(CGF.Builder.CreateBitCast(ReturnSlot.getAddress(),
1127 CoerceTy->getPointerTo()));
1128 ResultRegDests.push_back(ReturnSlot);
1130 rewriteInputConstraintReferences(NumOutputs, 1, AsmString);
1133 /// shouldReturnTypeInRegister - Determine if the given type should be
1134 /// returned in a register (for the Darwin and MCU ABI).
1135 bool X86_32ABIInfo::shouldReturnTypeInRegister(QualType Ty,
1136 ASTContext &Context) const {
1137 uint64_t Size = Context.getTypeSize(Ty);
1139 // For i386, type must be register sized.
1140 // For the MCU ABI, it only needs to be <= 8-byte
1141 if ((IsMCUABI && Size > 64) || (!IsMCUABI && !isRegisterSize(Size)))
1144 if (Ty->isVectorType()) {
1145 // 64- and 128- bit vectors inside structures are not returned in
1147 if (Size == 64 || Size == 128)
1153 // If this is a builtin, pointer, enum, complex type, member pointer, or
1154 // member function pointer it is ok.
1155 if (Ty->getAs<BuiltinType>() || Ty->hasPointerRepresentation() ||
1156 Ty->isAnyComplexType() || Ty->isEnumeralType() ||
1157 Ty->isBlockPointerType() || Ty->isMemberPointerType())
1160 // Arrays are treated like records.
1161 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty))
1162 return shouldReturnTypeInRegister(AT->getElementType(), Context);
1164 // Otherwise, it must be a record type.
1165 const RecordType *RT = Ty->getAs<RecordType>();
1166 if (!RT) return false;
1168 // FIXME: Traverse bases here too.
1170 // Structure types are passed in register if all fields would be
1171 // passed in a register.
1172 for (const auto *FD : RT->getDecl()->fields()) {
1173 // Empty fields are ignored.
1174 if (isEmptyField(Context, FD, true))
1177 // Check fields recursively.
1178 if (!shouldReturnTypeInRegister(FD->getType(), Context))
1184 static bool is32Or64BitBasicType(QualType Ty, ASTContext &Context) {
1185 // Treat complex types as the element type.
1186 if (const ComplexType *CTy = Ty->getAs<ComplexType>())
1187 Ty = CTy->getElementType();
1189 // Check for a type which we know has a simple scalar argument-passing
1190 // convention without any padding. (We're specifically looking for 32
1191 // and 64-bit integer and integer-equivalents, float, and double.)
1192 if (!Ty->getAs<BuiltinType>() && !Ty->hasPointerRepresentation() &&
1193 !Ty->isEnumeralType() && !Ty->isBlockPointerType())
1196 uint64_t Size = Context.getTypeSize(Ty);
1197 return Size == 32 || Size == 64;
1200 /// Test whether an argument type which is to be passed indirectly (on the
1201 /// stack) would have the equivalent layout if it was expanded into separate
1202 /// arguments. If so, we prefer to do the latter to avoid inhibiting
1204 bool X86_32ABIInfo::canExpandIndirectArgument(QualType Ty) const {
1205 // We can only expand structure types.
1206 const RecordType *RT = Ty->getAs<RecordType>();
1209 const RecordDecl *RD = RT->getDecl();
1210 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
1211 if (!IsWin32StructABI ) {
1212 // On non-Windows, we have to conservatively match our old bitcode
1213 // prototypes in order to be ABI-compatible at the bitcode level.
1214 if (!CXXRD->isCLike())
1217 // Don't do this for dynamic classes.
1218 if (CXXRD->isDynamicClass())
1220 // Don't do this if there are any non-empty bases.
1221 for (const CXXBaseSpecifier &Base : CXXRD->bases()) {
1222 if (!isEmptyRecord(getContext(), Base.getType(), /*AllowArrays=*/true))
1230 for (const auto *FD : RD->fields()) {
1231 // Scalar arguments on the stack get 4 byte alignment on x86. If the
1232 // argument is smaller than 32-bits, expanding the struct will create
1233 // alignment padding.
1234 if (!is32Or64BitBasicType(FD->getType(), getContext()))
1237 // FIXME: Reject bit-fields wholesale; there are two problems, we don't know
1238 // how to expand them yet, and the predicate for telling if a bitfield still
1239 // counts as "basic" is more complicated than what we were doing previously.
1240 if (FD->isBitField())
1243 Size += getContext().getTypeSize(FD->getType());
1246 // We can do this if there was no alignment padding.
1247 return Size == getContext().getTypeSize(Ty);
1250 ABIArgInfo X86_32ABIInfo::getIndirectReturnResult(QualType RetTy, CCState &State) const {
1251 // If the return value is indirect, then the hidden argument is consuming one
1252 // integer register.
1253 if (State.FreeRegs) {
1256 return getNaturalAlignIndirectInReg(RetTy);
1258 return getNaturalAlignIndirect(RetTy, /*ByVal=*/false);
1261 ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy,
1262 CCState &State) const {
1263 if (RetTy->isVoidType())
1264 return ABIArgInfo::getIgnore();
1266 const Type *Base = nullptr;
1267 uint64_t NumElts = 0;
1268 if ((State.CC == llvm::CallingConv::X86_VectorCall ||
1269 State.CC == llvm::CallingConv::X86_RegCall) &&
1270 isHomogeneousAggregate(RetTy, Base, NumElts)) {
1271 // The LLVM struct type for such an aggregate should lower properly.
1272 return ABIArgInfo::getDirect();
1275 if (const VectorType *VT = RetTy->getAs<VectorType>()) {
1276 // On Darwin, some vectors are returned in registers.
1277 if (IsDarwinVectorABI) {
1278 uint64_t Size = getContext().getTypeSize(RetTy);
1280 // 128-bit vectors are a special case; they are returned in
1281 // registers and we need to make sure to pick a type the LLVM
1282 // backend will like.
1284 return ABIArgInfo::getDirect(llvm::VectorType::get(
1285 llvm::Type::getInt64Ty(getVMContext()), 2));
1287 // Always return in register if it fits in a general purpose
1288 // register, or if it is 64 bits and has a single element.
1289 if ((Size == 8 || Size == 16 || Size == 32) ||
1290 (Size == 64 && VT->getNumElements() == 1))
1291 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
1294 return getIndirectReturnResult(RetTy, State);
1297 return ABIArgInfo::getDirect();
1300 if (isAggregateTypeForABI(RetTy)) {
1301 if (const RecordType *RT = RetTy->getAs<RecordType>()) {
1302 // Structures with flexible arrays are always indirect.
1303 if (RT->getDecl()->hasFlexibleArrayMember())
1304 return getIndirectReturnResult(RetTy, State);
1307 // If specified, structs and unions are always indirect.
1308 if (!IsRetSmallStructInRegABI && !RetTy->isAnyComplexType())
1309 return getIndirectReturnResult(RetTy, State);
1311 // Ignore empty structs/unions.
1312 if (isEmptyRecord(getContext(), RetTy, true))
1313 return ABIArgInfo::getIgnore();
1315 // Small structures which are register sized are generally returned
1317 if (shouldReturnTypeInRegister(RetTy, getContext())) {
1318 uint64_t Size = getContext().getTypeSize(RetTy);
1320 // As a special-case, if the struct is a "single-element" struct, and
1321 // the field is of type "float" or "double", return it in a
1322 // floating-point register. (MSVC does not apply this special case.)
1323 // We apply a similar transformation for pointer types to improve the
1324 // quality of the generated IR.
1325 if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext()))
1326 if ((!IsWin32StructABI && SeltTy->isRealFloatingType())
1327 || SeltTy->hasPointerRepresentation())
1328 return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
1330 // FIXME: We should be able to narrow this integer in cases with dead
1332 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),Size));
1335 return getIndirectReturnResult(RetTy, State);
1338 // Treat an enum type as its underlying type.
1339 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
1340 RetTy = EnumTy->getDecl()->getIntegerType();
1342 return (RetTy->isPromotableIntegerType() ?
1343 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
1346 static bool isSSEVectorType(ASTContext &Context, QualType Ty) {
1347 return Ty->getAs<VectorType>() && Context.getTypeSize(Ty) == 128;
1350 static bool isRecordWithSSEVectorType(ASTContext &Context, QualType Ty) {
1351 const RecordType *RT = Ty->getAs<RecordType>();
1354 const RecordDecl *RD = RT->getDecl();
1356 // If this is a C++ record, check the bases first.
1357 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
1358 for (const auto &I : CXXRD->bases())
1359 if (!isRecordWithSSEVectorType(Context, I.getType()))
1362 for (const auto *i : RD->fields()) {
1363 QualType FT = i->getType();
1365 if (isSSEVectorType(Context, FT))
1368 if (isRecordWithSSEVectorType(Context, FT))
1375 unsigned X86_32ABIInfo::getTypeStackAlignInBytes(QualType Ty,
1376 unsigned Align) const {
1377 // Otherwise, if the alignment is less than or equal to the minimum ABI
1378 // alignment, just use the default; the backend will handle this.
1379 if (Align <= MinABIStackAlignInBytes)
1380 return 0; // Use default alignment.
1382 // On non-Darwin, the stack type alignment is always 4.
1383 if (!IsDarwinVectorABI) {
1384 // Set explicit alignment, since we may need to realign the top.
1385 return MinABIStackAlignInBytes;
1388 // Otherwise, if the type contains an SSE vector type, the alignment is 16.
1389 if (Align >= 16 && (isSSEVectorType(getContext(), Ty) ||
1390 isRecordWithSSEVectorType(getContext(), Ty)))
1393 return MinABIStackAlignInBytes;
1396 ABIArgInfo X86_32ABIInfo::getIndirectResult(QualType Ty, bool ByVal,
1397 CCState &State) const {
1399 if (State.FreeRegs) {
1400 --State.FreeRegs; // Non-byval indirects just use one pointer.
1402 return getNaturalAlignIndirectInReg(Ty);
1404 return getNaturalAlignIndirect(Ty, false);
1407 // Compute the byval alignment.
1408 unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8;
1409 unsigned StackAlign = getTypeStackAlignInBytes(Ty, TypeAlign);
1410 if (StackAlign == 0)
1411 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(4), /*ByVal=*/true);
1413 // If the stack alignment is less than the type alignment, realign the
1415 bool Realign = TypeAlign > StackAlign;
1416 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(StackAlign),
1417 /*ByVal=*/true, Realign);
1420 X86_32ABIInfo::Class X86_32ABIInfo::classify(QualType Ty) const {
1421 const Type *T = isSingleElementStruct(Ty, getContext());
1423 T = Ty.getTypePtr();
1425 if (const BuiltinType *BT = T->getAs<BuiltinType>()) {
1426 BuiltinType::Kind K = BT->getKind();
1427 if (K == BuiltinType::Float || K == BuiltinType::Double)
1433 bool X86_32ABIInfo::updateFreeRegs(QualType Ty, CCState &State) const {
1434 if (!IsSoftFloatABI) {
1435 Class C = classify(Ty);
1440 unsigned Size = getContext().getTypeSize(Ty);
1441 unsigned SizeInRegs = (Size + 31) / 32;
1443 if (SizeInRegs == 0)
1447 if (SizeInRegs > State.FreeRegs) {
1452 // The MCU psABI allows passing parameters in-reg even if there are
1453 // earlier parameters that are passed on the stack. Also,
1454 // it does not allow passing >8-byte structs in-register,
1455 // even if there are 3 free registers available.
1456 if (SizeInRegs > State.FreeRegs || SizeInRegs > 2)
1460 State.FreeRegs -= SizeInRegs;
1464 bool X86_32ABIInfo::shouldAggregateUseDirect(QualType Ty, CCState &State,
1466 bool &NeedsPadding) const {
1467 // On Windows, aggregates other than HFAs are never passed in registers, and
1468 // they do not consume register slots. Homogenous floating-point aggregates
1469 // (HFAs) have already been dealt with at this point.
1470 if (IsWin32StructABI && isAggregateTypeForABI(Ty))
1473 NeedsPadding = false;
1476 if (!updateFreeRegs(Ty, State))
1482 if (State.CC == llvm::CallingConv::X86_FastCall ||
1483 State.CC == llvm::CallingConv::X86_VectorCall ||
1484 State.CC == llvm::CallingConv::X86_RegCall) {
1485 if (getContext().getTypeSize(Ty) <= 32 && State.FreeRegs)
1486 NeedsPadding = true;
1494 bool X86_32ABIInfo::shouldPrimitiveUseInReg(QualType Ty, CCState &State) const {
1495 if (!updateFreeRegs(Ty, State))
1501 if (State.CC == llvm::CallingConv::X86_FastCall ||
1502 State.CC == llvm::CallingConv::X86_VectorCall ||
1503 State.CC == llvm::CallingConv::X86_RegCall) {
1504 if (getContext().getTypeSize(Ty) > 32)
1507 return (Ty->isIntegralOrEnumerationType() || Ty->isPointerType() ||
1508 Ty->isReferenceType());
1515 X86_32ABIInfo::reclassifyHvaArgType(QualType Ty, CCState &State,
1516 const ABIArgInfo ¤t) const {
1517 // Assumes vectorCall calling convention.
1518 const Type *Base = nullptr;
1519 uint64_t NumElts = 0;
1521 if (!Ty->isBuiltinType() && !Ty->isVectorType() &&
1522 isHomogeneousAggregate(Ty, Base, NumElts)) {
1523 if (State.FreeSSERegs >= NumElts) {
1524 // HVA types get passed directly in registers if there is room.
1525 State.FreeSSERegs -= NumElts;
1526 return getDirectX86Hva();
1528 // If there's no room, the HVA gets passed as normal indirect
1530 return getIndirectResult(Ty, /*ByVal=*/false, State);
1535 ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty,
1536 CCState &State) const {
1537 // FIXME: Set alignment on indirect arguments.
1539 Ty = useFirstFieldIfTransparentUnion(Ty);
1541 // Check with the C++ ABI first.
1542 const RecordType *RT = Ty->getAs<RecordType>();
1544 CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI());
1545 if (RAA == CGCXXABI::RAA_Indirect) {
1546 return getIndirectResult(Ty, false, State);
1547 } else if (RAA == CGCXXABI::RAA_DirectInMemory) {
1548 // The field index doesn't matter, we'll fix it up later.
1549 return ABIArgInfo::getInAlloca(/*FieldIndex=*/0);
1553 // vectorcall adds the concept of a homogenous vector aggregate, similar
1554 // to other targets, regcall uses some of the HVA rules.
1555 const Type *Base = nullptr;
1556 uint64_t NumElts = 0;
1557 if ((State.CC == llvm::CallingConv::X86_VectorCall ||
1558 State.CC == llvm::CallingConv::X86_RegCall) &&
1559 isHomogeneousAggregate(Ty, Base, NumElts)) {
1561 if (State.CC == llvm::CallingConv::X86_RegCall) {
1562 if (State.FreeSSERegs >= NumElts) {
1563 State.FreeSSERegs -= NumElts;
1564 if (Ty->isBuiltinType() || Ty->isVectorType())
1565 return ABIArgInfo::getDirect();
1566 return ABIArgInfo::getExpand();
1569 return getIndirectResult(Ty, /*ByVal=*/false, State);
1570 } else if (State.CC == llvm::CallingConv::X86_VectorCall) {
1571 if (State.FreeSSERegs >= NumElts && (Ty->isBuiltinType() || Ty->isVectorType())) {
1572 // Actual floating-point types get registers first time through if
1573 // there is registers available
1574 State.FreeSSERegs -= NumElts;
1575 return ABIArgInfo::getDirect();
1576 } else if (!Ty->isBuiltinType() && !Ty->isVectorType()) {
1577 // HVA Types only get registers after everything else has been
1578 // set, so it gets set as indirect for now.
1579 return ABIArgInfo::getIndirect(getContext().getTypeAlignInChars(Ty));
1584 if (isAggregateTypeForABI(Ty)) {
1585 // Structures with flexible arrays are always indirect.
1586 // FIXME: This should not be byval!
1587 if (RT && RT->getDecl()->hasFlexibleArrayMember())
1588 return getIndirectResult(Ty, true, State);
1590 // Ignore empty structs/unions on non-Windows.
1591 if (!IsWin32StructABI && isEmptyRecord(getContext(), Ty, true))
1592 return ABIArgInfo::getIgnore();
1594 llvm::LLVMContext &LLVMContext = getVMContext();
1595 llvm::IntegerType *Int32 = llvm::Type::getInt32Ty(LLVMContext);
1596 bool NeedsPadding = false;
1598 if (shouldAggregateUseDirect(Ty, State, InReg, NeedsPadding)) {
1599 unsigned SizeInRegs = (getContext().getTypeSize(Ty) + 31) / 32;
1600 SmallVector<llvm::Type*, 3> Elements(SizeInRegs, Int32);
1601 llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements);
1603 return ABIArgInfo::getDirectInReg(Result);
1605 return ABIArgInfo::getDirect(Result);
1607 llvm::IntegerType *PaddingType = NeedsPadding ? Int32 : nullptr;
1609 // Expand small (<= 128-bit) record types when we know that the stack layout
1610 // of those arguments will match the struct. This is important because the
1611 // LLVM backend isn't smart enough to remove byval, which inhibits many
1613 // Don't do this for the MCU if there are still free integer registers
1614 // (see X86_64 ABI for full explanation).
1615 if (getContext().getTypeSize(Ty) <= 4 * 32 &&
1616 (!IsMCUABI || State.FreeRegs == 0) && canExpandIndirectArgument(Ty))
1617 return ABIArgInfo::getExpandWithPadding(
1618 State.CC == llvm::CallingConv::X86_FastCall ||
1619 State.CC == llvm::CallingConv::X86_VectorCall ||
1620 State.CC == llvm::CallingConv::X86_RegCall,
1623 return getIndirectResult(Ty, true, State);
1626 if (const VectorType *VT = Ty->getAs<VectorType>()) {
1627 // On Darwin, some vectors are passed in memory, we handle this by passing
1628 // it as an i8/i16/i32/i64.
1629 if (IsDarwinVectorABI) {
1630 uint64_t Size = getContext().getTypeSize(Ty);
1631 if ((Size == 8 || Size == 16 || Size == 32) ||
1632 (Size == 64 && VT->getNumElements() == 1))
1633 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
1637 if (IsX86_MMXType(CGT.ConvertType(Ty)))
1638 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 64));
1640 return ABIArgInfo::getDirect();
1644 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
1645 Ty = EnumTy->getDecl()->getIntegerType();
1647 bool InReg = shouldPrimitiveUseInReg(Ty, State);
1649 if (Ty->isPromotableIntegerType()) {
1651 return ABIArgInfo::getExtendInReg();
1652 return ABIArgInfo::getExtend();
1656 return ABIArgInfo::getDirectInReg();
1657 return ABIArgInfo::getDirect();
1660 void X86_32ABIInfo::computeVectorCallArgs(CGFunctionInfo &FI, CCState &State,
1661 bool &UsedInAlloca) const {
1662 // Vectorcall only allows the first 6 parameters to be passed in registers,
1663 // and homogeneous vector aggregates are only put into registers as a second
1666 CCState ZeroState = State;
1667 ZeroState.FreeRegs = ZeroState.FreeSSERegs = 0;
1668 // HVAs must be done as a second priority for registers, so the deferred
1669 // items are dealt with by going through the pattern a second time.
1670 for (auto &I : FI.arguments()) {
1671 if (Count < VectorcallMaxParamNumAsReg)
1672 I.info = classifyArgumentType(I.type, State);
1674 // Parameters after the 6th cannot be passed in registers,
1675 // so pretend there are no registers left for them.
1676 I.info = classifyArgumentType(I.type, ZeroState);
1677 UsedInAlloca |= (I.info.getKind() == ABIArgInfo::InAlloca);
1681 // Go through the arguments a second time to get HVAs registers if there
1682 // are still some available.
1683 for (auto &I : FI.arguments()) {
1684 if (Count < VectorcallMaxParamNumAsReg)
1685 I.info = reclassifyHvaArgType(I.type, State, I.info);
1690 void X86_32ABIInfo::computeInfo(CGFunctionInfo &FI) const {
1691 CCState State(FI.getCallingConvention());
1694 else if (State.CC == llvm::CallingConv::X86_FastCall)
1696 else if (State.CC == llvm::CallingConv::X86_VectorCall) {
1698 State.FreeSSERegs = 6;
1699 } else if (FI.getHasRegParm())
1700 State.FreeRegs = FI.getRegParm();
1701 else if (State.CC == llvm::CallingConv::X86_RegCall) {
1703 State.FreeSSERegs = 8;
1705 State.FreeRegs = DefaultNumRegisterParameters;
1707 if (!getCXXABI().classifyReturnType(FI)) {
1708 FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), State);
1709 } else if (FI.getReturnInfo().isIndirect()) {
1710 // The C++ ABI is not aware of register usage, so we have to check if the
1711 // return value was sret and put it in a register ourselves if appropriate.
1712 if (State.FreeRegs) {
1713 --State.FreeRegs; // The sret parameter consumes a register.
1715 FI.getReturnInfo().setInReg(true);
1719 // The chain argument effectively gives us another free register.
1720 if (FI.isChainCall())
1723 bool UsedInAlloca = false;
1724 if (State.CC == llvm::CallingConv::X86_VectorCall) {
1725 computeVectorCallArgs(FI, State, UsedInAlloca);
1727 // If not vectorcall, revert to normal behavior.
1728 for (auto &I : FI.arguments()) {
1729 I.info = classifyArgumentType(I.type, State);
1730 UsedInAlloca |= (I.info.getKind() == ABIArgInfo::InAlloca);
1734 // If we needed to use inalloca for any argument, do a second pass and rewrite
1735 // all the memory arguments to use inalloca.
1737 rewriteWithInAlloca(FI);
1741 X86_32ABIInfo::addFieldToArgStruct(SmallVector<llvm::Type *, 6> &FrameFields,
1742 CharUnits &StackOffset, ABIArgInfo &Info,
1743 QualType Type) const {
1744 // Arguments are always 4-byte-aligned.
1745 CharUnits FieldAlign = CharUnits::fromQuantity(4);
1747 assert(StackOffset.isMultipleOf(FieldAlign) && "unaligned inalloca struct");
1748 Info = ABIArgInfo::getInAlloca(FrameFields.size());
1749 FrameFields.push_back(CGT.ConvertTypeForMem(Type));
1750 StackOffset += getContext().getTypeSizeInChars(Type);
1752 // Insert padding bytes to respect alignment.
1753 CharUnits FieldEnd = StackOffset;
1754 StackOffset = FieldEnd.alignTo(FieldAlign);
1755 if (StackOffset != FieldEnd) {
1756 CharUnits NumBytes = StackOffset - FieldEnd;
1757 llvm::Type *Ty = llvm::Type::getInt8Ty(getVMContext());
1758 Ty = llvm::ArrayType::get(Ty, NumBytes.getQuantity());
1759 FrameFields.push_back(Ty);
1763 static bool isArgInAlloca(const ABIArgInfo &Info) {
1764 // Leave ignored and inreg arguments alone.
1765 switch (Info.getKind()) {
1766 case ABIArgInfo::InAlloca:
1768 case ABIArgInfo::Indirect:
1769 assert(Info.getIndirectByVal());
1771 case ABIArgInfo::Ignore:
1773 case ABIArgInfo::Direct:
1774 case ABIArgInfo::Extend:
1775 if (Info.getInReg())
1778 case ABIArgInfo::Expand:
1779 case ABIArgInfo::CoerceAndExpand:
1780 // These are aggregate types which are never passed in registers when
1781 // inalloca is involved.
1784 llvm_unreachable("invalid enum");
1787 void X86_32ABIInfo::rewriteWithInAlloca(CGFunctionInfo &FI) const {
1788 assert(IsWin32StructABI && "inalloca only supported on win32");
1790 // Build a packed struct type for all of the arguments in memory.
1791 SmallVector<llvm::Type *, 6> FrameFields;
1793 // The stack alignment is always 4.
1794 CharUnits StackAlign = CharUnits::fromQuantity(4);
1796 CharUnits StackOffset;
1797 CGFunctionInfo::arg_iterator I = FI.arg_begin(), E = FI.arg_end();
1799 // Put 'this' into the struct before 'sret', if necessary.
1801 FI.getCallingConvention() == llvm::CallingConv::X86_ThisCall;
1802 ABIArgInfo &Ret = FI.getReturnInfo();
1803 if (Ret.isIndirect() && Ret.isSRetAfterThis() && !IsThisCall &&
1804 isArgInAlloca(I->info)) {
1805 addFieldToArgStruct(FrameFields, StackOffset, I->info, I->type);
1809 // Put the sret parameter into the inalloca struct if it's in memory.
1810 if (Ret.isIndirect() && !Ret.getInReg()) {
1811 CanQualType PtrTy = getContext().getPointerType(FI.getReturnType());
1812 addFieldToArgStruct(FrameFields, StackOffset, Ret, PtrTy);
1813 // On Windows, the hidden sret parameter is always returned in eax.
1814 Ret.setInAllocaSRet(IsWin32StructABI);
1817 // Skip the 'this' parameter in ecx.
1821 // Put arguments passed in memory into the struct.
1822 for (; I != E; ++I) {
1823 if (isArgInAlloca(I->info))
1824 addFieldToArgStruct(FrameFields, StackOffset, I->info, I->type);
1827 FI.setArgStruct(llvm::StructType::get(getVMContext(), FrameFields,
1832 Address X86_32ABIInfo::EmitVAArg(CodeGenFunction &CGF,
1833 Address VAListAddr, QualType Ty) const {
1835 auto TypeInfo = getContext().getTypeInfoInChars(Ty);
1837 // x86-32 changes the alignment of certain arguments on the stack.
1839 // Just messing with TypeInfo like this works because we never pass
1840 // anything indirectly.
1841 TypeInfo.second = CharUnits::fromQuantity(
1842 getTypeStackAlignInBytes(Ty, TypeInfo.second.getQuantity()));
1844 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*Indirect*/ false,
1845 TypeInfo, CharUnits::fromQuantity(4),
1846 /*AllowHigherAlign*/ true);
1849 bool X86_32TargetCodeGenInfo::isStructReturnInRegABI(
1850 const llvm::Triple &Triple, const CodeGenOptions &Opts) {
1851 assert(Triple.getArch() == llvm::Triple::x86);
1853 switch (Opts.getStructReturnConvention()) {
1854 case CodeGenOptions::SRCK_Default:
1856 case CodeGenOptions::SRCK_OnStack: // -fpcc-struct-return
1858 case CodeGenOptions::SRCK_InRegs: // -freg-struct-return
1862 if (Triple.isOSDarwin() || Triple.isOSIAMCU())
1865 switch (Triple.getOS()) {
1866 case llvm::Triple::DragonFly:
1867 case llvm::Triple::FreeBSD:
1868 case llvm::Triple::OpenBSD:
1869 case llvm::Triple::Bitrig:
1870 case llvm::Triple::Win32:
1877 void X86_32TargetCodeGenInfo::setTargetAttributes(const Decl *D,
1878 llvm::GlobalValue *GV,
1879 CodeGen::CodeGenModule &CGM) const {
1880 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
1881 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
1882 // Get the LLVM function.
1883 llvm::Function *Fn = cast<llvm::Function>(GV);
1885 // Now add the 'alignstack' attribute with a value of 16.
1886 llvm::AttrBuilder B;
1887 B.addStackAlignmentAttr(16);
1888 Fn->addAttributes(llvm::AttributeSet::FunctionIndex,
1889 llvm::AttributeSet::get(CGM.getLLVMContext(),
1890 llvm::AttributeSet::FunctionIndex,
1893 if (FD->hasAttr<AnyX86InterruptAttr>()) {
1894 llvm::Function *Fn = cast<llvm::Function>(GV);
1895 Fn->setCallingConv(llvm::CallingConv::X86_INTR);
1900 bool X86_32TargetCodeGenInfo::initDwarfEHRegSizeTable(
1901 CodeGen::CodeGenFunction &CGF,
1902 llvm::Value *Address) const {
1903 CodeGen::CGBuilderTy &Builder = CGF.Builder;
1905 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4);
1907 // 0-7 are the eight integer registers; the order is different
1908 // on Darwin (for EH), but the range is the same.
1910 AssignToArrayRange(Builder, Address, Four8, 0, 8);
1912 if (CGF.CGM.getTarget().getTriple().isOSDarwin()) {
1913 // 12-16 are st(0..4). Not sure why we stop at 4.
1914 // These have size 16, which is sizeof(long double) on
1915 // platforms with 8-byte alignment for that type.
1916 llvm::Value *Sixteen8 = llvm::ConstantInt::get(CGF.Int8Ty, 16);
1917 AssignToArrayRange(Builder, Address, Sixteen8, 12, 16);
1920 // 9 is %eflags, which doesn't get a size on Darwin for some
1922 Builder.CreateAlignedStore(
1923 Four8, Builder.CreateConstInBoundsGEP1_32(CGF.Int8Ty, Address, 9),
1926 // 11-16 are st(0..5). Not sure why we stop at 5.
1927 // These have size 12, which is sizeof(long double) on
1928 // platforms with 4-byte alignment for that type.
1929 llvm::Value *Twelve8 = llvm::ConstantInt::get(CGF.Int8Ty, 12);
1930 AssignToArrayRange(Builder, Address, Twelve8, 11, 16);
1936 //===----------------------------------------------------------------------===//
1937 // X86-64 ABI Implementation
1938 //===----------------------------------------------------------------------===//
1942 /// The AVX ABI level for X86 targets.
1943 enum class X86AVXABILevel {
1949 /// \p returns the size in bits of the largest (native) vector for \p AVXLevel.
1950 static unsigned getNativeVectorSizeForAVXABI(X86AVXABILevel AVXLevel) {
1952 case X86AVXABILevel::AVX512:
1954 case X86AVXABILevel::AVX:
1956 case X86AVXABILevel::None:
1959 llvm_unreachable("Unknown AVXLevel");
1962 /// X86_64ABIInfo - The X86_64 ABI information.
1963 class X86_64ABIInfo : public SwiftABIInfo {
1975 /// merge - Implement the X86_64 ABI merging algorithm.
1977 /// Merge an accumulating classification \arg Accum with a field
1978 /// classification \arg Field.
1980 /// \param Accum - The accumulating classification. This should
1981 /// always be either NoClass or the result of a previous merge
1982 /// call. In addition, this should never be Memory (the caller
1983 /// should just return Memory for the aggregate).
1984 static Class merge(Class Accum, Class Field);
1986 /// postMerge - Implement the X86_64 ABI post merging algorithm.
1988 /// Post merger cleanup, reduces a malformed Hi and Lo pair to
1989 /// final MEMORY or SSE classes when necessary.
1991 /// \param AggregateSize - The size of the current aggregate in
1992 /// the classification process.
1994 /// \param Lo - The classification for the parts of the type
1995 /// residing in the low word of the containing object.
1997 /// \param Hi - The classification for the parts of the type
1998 /// residing in the higher words of the containing object.
2000 void postMerge(unsigned AggregateSize, Class &Lo, Class &Hi) const;
2002 /// classify - Determine the x86_64 register classes in which the
2003 /// given type T should be passed.
2005 /// \param Lo - The classification for the parts of the type
2006 /// residing in the low word of the containing object.
2008 /// \param Hi - The classification for the parts of the type
2009 /// residing in the high word of the containing object.
2011 /// \param OffsetBase - The bit offset of this type in the
2012 /// containing object. Some parameters are classified different
2013 /// depending on whether they straddle an eightbyte boundary.
2015 /// \param isNamedArg - Whether the argument in question is a "named"
2016 /// argument, as used in AMD64-ABI 3.5.7.
2018 /// If a word is unused its result will be NoClass; if a type should
2019 /// be passed in Memory then at least the classification of \arg Lo
2022 /// The \arg Lo class will be NoClass iff the argument is ignored.
2024 /// If the \arg Lo class is ComplexX87, then the \arg Hi class will
2025 /// also be ComplexX87.
2026 void classify(QualType T, uint64_t OffsetBase, Class &Lo, Class &Hi,
2027 bool isNamedArg) const;
2029 llvm::Type *GetByteVectorType(QualType Ty) const;
2030 llvm::Type *GetSSETypeAtOffset(llvm::Type *IRType,
2031 unsigned IROffset, QualType SourceTy,
2032 unsigned SourceOffset) const;
2033 llvm::Type *GetINTEGERTypeAtOffset(llvm::Type *IRType,
2034 unsigned IROffset, QualType SourceTy,
2035 unsigned SourceOffset) const;
2037 /// getIndirectResult - Give a source type \arg Ty, return a suitable result
2038 /// such that the argument will be returned in memory.
2039 ABIArgInfo getIndirectReturnResult(QualType Ty) const;
2041 /// getIndirectResult - Give a source type \arg Ty, return a suitable result
2042 /// such that the argument will be passed in memory.
2044 /// \param freeIntRegs - The number of free integer registers remaining
2046 ABIArgInfo getIndirectResult(QualType Ty, unsigned freeIntRegs) const;
2048 ABIArgInfo classifyReturnType(QualType RetTy) const;
2050 ABIArgInfo classifyArgumentType(QualType Ty, unsigned freeIntRegs,
2051 unsigned &neededInt, unsigned &neededSSE,
2052 bool isNamedArg) const;
2054 ABIArgInfo classifyRegCallStructType(QualType Ty, unsigned &NeededInt,
2055 unsigned &NeededSSE) const;
2057 ABIArgInfo classifyRegCallStructTypeImpl(QualType Ty, unsigned &NeededInt,
2058 unsigned &NeededSSE) const;
2060 bool IsIllegalVectorType(QualType Ty) const;
2062 /// The 0.98 ABI revision clarified a lot of ambiguities,
2063 /// unfortunately in ways that were not always consistent with
2064 /// certain previous compilers. In particular, platforms which
2065 /// required strict binary compatibility with older versions of GCC
2066 /// may need to exempt themselves.
2067 bool honorsRevision0_98() const {
2068 return !getTarget().getTriple().isOSDarwin();
2071 /// GCC classifies <1 x long long> as SSE but compatibility with older clang
2072 // compilers require us to classify it as INTEGER.
2073 bool classifyIntegerMMXAsSSE() const {
2074 const llvm::Triple &Triple = getTarget().getTriple();
2075 if (Triple.isOSDarwin() || Triple.getOS() == llvm::Triple::PS4)
2077 if (Triple.isOSFreeBSD() && Triple.getOSMajorVersion() >= 10)
2082 X86AVXABILevel AVXLevel;
2083 // Some ABIs (e.g. X32 ABI and Native Client OS) use 32 bit pointers on
2085 bool Has64BitPointers;
2088 X86_64ABIInfo(CodeGen::CodeGenTypes &CGT, X86AVXABILevel AVXLevel) :
2089 SwiftABIInfo(CGT), AVXLevel(AVXLevel),
2090 Has64BitPointers(CGT.getDataLayout().getPointerSize(0) == 8) {
2093 bool isPassedUsingAVXType(QualType type) const {
2094 unsigned neededInt, neededSSE;
2095 // The freeIntRegs argument doesn't matter here.
2096 ABIArgInfo info = classifyArgumentType(type, 0, neededInt, neededSSE,
2097 /*isNamedArg*/true);
2098 if (info.isDirect()) {
2099 llvm::Type *ty = info.getCoerceToType();
2100 if (llvm::VectorType *vectorTy = dyn_cast_or_null<llvm::VectorType>(ty))
2101 return (vectorTy->getBitWidth() > 128);
2106 void computeInfo(CGFunctionInfo &FI) const override;
2108 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
2109 QualType Ty) const override;
2110 Address EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
2111 QualType Ty) const override;
2113 bool has64BitPointers() const {
2114 return Has64BitPointers;
2117 bool shouldPassIndirectlyForSwift(CharUnits totalSize,
2118 ArrayRef<llvm::Type*> scalars,
2119 bool asReturnValue) const override {
2120 return occupiesMoreThan(CGT, scalars, /*total*/ 4);
2122 bool isSwiftErrorInRegister() const override {
2127 /// WinX86_64ABIInfo - The Windows X86_64 ABI information.
2128 class WinX86_64ABIInfo : public SwiftABIInfo {
2130 WinX86_64ABIInfo(CodeGen::CodeGenTypes &CGT)
2131 : SwiftABIInfo(CGT),
2132 IsMingw64(getTarget().getTriple().isWindowsGNUEnvironment()) {}
2134 void computeInfo(CGFunctionInfo &FI) const override;
2136 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
2137 QualType Ty) const override;
2139 bool isHomogeneousAggregateBaseType(QualType Ty) const override {
2140 // FIXME: Assumes vectorcall is in use.
2141 return isX86VectorTypeForVectorCall(getContext(), Ty);
2144 bool isHomogeneousAggregateSmallEnough(const Type *Ty,
2145 uint64_t NumMembers) const override {
2146 // FIXME: Assumes vectorcall is in use.
2147 return isX86VectorCallAggregateSmallEnough(NumMembers);
2150 bool shouldPassIndirectlyForSwift(CharUnits totalSize,
2151 ArrayRef<llvm::Type *> scalars,
2152 bool asReturnValue) const override {
2153 return occupiesMoreThan(CGT, scalars, /*total*/ 4);
2156 bool isSwiftErrorInRegister() const override {
2161 ABIArgInfo classify(QualType Ty, unsigned &FreeSSERegs, bool IsReturnType,
2162 bool IsVectorCall, bool IsRegCall) const;
2163 ABIArgInfo reclassifyHvaArgType(QualType Ty, unsigned &FreeSSERegs,
2164 const ABIArgInfo ¤t) const;
2165 void computeVectorCallArgs(CGFunctionInfo &FI, unsigned FreeSSERegs,
2166 bool IsVectorCall, bool IsRegCall) const;
2171 class X86_64TargetCodeGenInfo : public TargetCodeGenInfo {
2173 X86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, X86AVXABILevel AVXLevel)
2174 : TargetCodeGenInfo(new X86_64ABIInfo(CGT, AVXLevel)) {}
2176 const X86_64ABIInfo &getABIInfo() const {
2177 return static_cast<const X86_64ABIInfo&>(TargetCodeGenInfo::getABIInfo());
2180 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
2184 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
2185 llvm::Value *Address) const override {
2186 llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8);
2188 // 0-15 are the 16 integer registers.
2190 AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 16);
2194 llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
2195 StringRef Constraint,
2196 llvm::Type* Ty) const override {
2197 return X86AdjustInlineAsmType(CGF, Constraint, Ty);
2200 bool isNoProtoCallVariadic(const CallArgList &args,
2201 const FunctionNoProtoType *fnType) const override {
2202 // The default CC on x86-64 sets %al to the number of SSA
2203 // registers used, and GCC sets this when calling an unprototyped
2204 // function, so we override the default behavior. However, don't do
2205 // that when AVX types are involved: the ABI explicitly states it is
2206 // undefined, and it doesn't work in practice because of how the ABI
2207 // defines varargs anyway.
2208 if (fnType->getCallConv() == CC_C) {
2209 bool HasAVXType = false;
2210 for (CallArgList::const_iterator
2211 it = args.begin(), ie = args.end(); it != ie; ++it) {
2212 if (getABIInfo().isPassedUsingAVXType(it->Ty)) {
2222 return TargetCodeGenInfo::isNoProtoCallVariadic(args, fnType);
2226 getUBSanFunctionSignature(CodeGen::CodeGenModule &CGM) const override {
2228 if (getABIInfo().has64BitPointers())
2229 Sig = (0xeb << 0) | // jmp rel8
2230 (0x0a << 8) | // .+0x0c
2234 Sig = (0xeb << 0) | // jmp rel8
2235 (0x06 << 8) | // .+0x08
2238 return llvm::ConstantInt::get(CGM.Int32Ty, Sig);
2241 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
2242 CodeGen::CodeGenModule &CGM) const override {
2243 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
2244 if (FD->hasAttr<AnyX86InterruptAttr>()) {
2245 llvm::Function *Fn = cast<llvm::Function>(GV);
2246 Fn->setCallingConv(llvm::CallingConv::X86_INTR);
2252 class PS4TargetCodeGenInfo : public X86_64TargetCodeGenInfo {
2254 PS4TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, X86AVXABILevel AVXLevel)
2255 : X86_64TargetCodeGenInfo(CGT, AVXLevel) {}
2257 void getDependentLibraryOption(llvm::StringRef Lib,
2258 llvm::SmallString<24> &Opt) const override {
2260 // If the argument contains a space, enclose it in quotes.
2261 if (Lib.find(" ") != StringRef::npos)
2262 Opt += "\"" + Lib.str() + "\"";
2268 static std::string qualifyWindowsLibrary(llvm::StringRef Lib) {
2269 // If the argument does not end in .lib, automatically add the suffix.
2270 // If the argument contains a space, enclose it in quotes.
2271 // This matches the behavior of MSVC.
2272 bool Quote = (Lib.find(" ") != StringRef::npos);
2273 std::string ArgStr = Quote ? "\"" : "";
2275 if (!Lib.endswith_lower(".lib"))
2277 ArgStr += Quote ? "\"" : "";
2281 class WinX86_32TargetCodeGenInfo : public X86_32TargetCodeGenInfo {
2283 WinX86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT,
2284 bool DarwinVectorABI, bool RetSmallStructInRegABI, bool Win32StructABI,
2285 unsigned NumRegisterParameters)
2286 : X86_32TargetCodeGenInfo(CGT, DarwinVectorABI, RetSmallStructInRegABI,
2287 Win32StructABI, NumRegisterParameters, false) {}
2289 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
2290 CodeGen::CodeGenModule &CGM) const override;
2292 void getDependentLibraryOption(llvm::StringRef Lib,
2293 llvm::SmallString<24> &Opt) const override {
2294 Opt = "/DEFAULTLIB:";
2295 Opt += qualifyWindowsLibrary(Lib);
2298 void getDetectMismatchOption(llvm::StringRef Name,
2299 llvm::StringRef Value,
2300 llvm::SmallString<32> &Opt) const override {
2301 Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\"";
2305 static void addStackProbeSizeTargetAttribute(const Decl *D,
2306 llvm::GlobalValue *GV,
2307 CodeGen::CodeGenModule &CGM) {
2308 if (D && isa<FunctionDecl>(D)) {
2309 if (CGM.getCodeGenOpts().StackProbeSize != 4096) {
2310 llvm::Function *Fn = cast<llvm::Function>(GV);
2312 Fn->addFnAttr("stack-probe-size",
2313 llvm::utostr(CGM.getCodeGenOpts().StackProbeSize));
2318 void WinX86_32TargetCodeGenInfo::setTargetAttributes(const Decl *D,
2319 llvm::GlobalValue *GV,
2320 CodeGen::CodeGenModule &CGM) const {
2321 X86_32TargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
2323 addStackProbeSizeTargetAttribute(D, GV, CGM);
2326 class WinX86_64TargetCodeGenInfo : public TargetCodeGenInfo {
2328 WinX86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT,
2329 X86AVXABILevel AVXLevel)
2330 : TargetCodeGenInfo(new WinX86_64ABIInfo(CGT)) {}
2332 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
2333 CodeGen::CodeGenModule &CGM) const override;
2335 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
2339 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
2340 llvm::Value *Address) const override {
2341 llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8);
2343 // 0-15 are the 16 integer registers.
2345 AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 16);
2349 void getDependentLibraryOption(llvm::StringRef Lib,
2350 llvm::SmallString<24> &Opt) const override {
2351 Opt = "/DEFAULTLIB:";
2352 Opt += qualifyWindowsLibrary(Lib);
2355 void getDetectMismatchOption(llvm::StringRef Name,
2356 llvm::StringRef Value,
2357 llvm::SmallString<32> &Opt) const override {
2358 Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\"";
2362 void WinX86_64TargetCodeGenInfo::setTargetAttributes(const Decl *D,
2363 llvm::GlobalValue *GV,
2364 CodeGen::CodeGenModule &CGM) const {
2365 TargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
2367 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
2368 if (FD->hasAttr<AnyX86InterruptAttr>()) {
2369 llvm::Function *Fn = cast<llvm::Function>(GV);
2370 Fn->setCallingConv(llvm::CallingConv::X86_INTR);
2374 addStackProbeSizeTargetAttribute(D, GV, CGM);
2378 void X86_64ABIInfo::postMerge(unsigned AggregateSize, Class &Lo,
2380 // AMD64-ABI 3.2.3p2: Rule 5. Then a post merger cleanup is done:
2382 // (a) If one of the classes is Memory, the whole argument is passed in
2385 // (b) If X87UP is not preceded by X87, the whole argument is passed in
2388 // (c) If the size of the aggregate exceeds two eightbytes and the first
2389 // eightbyte isn't SSE or any other eightbyte isn't SSEUP, the whole
2390 // argument is passed in memory. NOTE: This is necessary to keep the
2391 // ABI working for processors that don't support the __m256 type.
2393 // (d) If SSEUP is not preceded by SSE or SSEUP, it is converted to SSE.
2395 // Some of these are enforced by the merging logic. Others can arise
2396 // only with unions; for example:
2397 // union { _Complex double; unsigned; }
2399 // Note that clauses (b) and (c) were added in 0.98.
2403 if (Hi == X87Up && Lo != X87 && honorsRevision0_98())
2405 if (AggregateSize > 128 && (Lo != SSE || Hi != SSEUp))
2407 if (Hi == SSEUp && Lo != SSE)
2411 X86_64ABIInfo::Class X86_64ABIInfo::merge(Class Accum, Class Field) {
2412 // AMD64-ABI 3.2.3p2: Rule 4. Each field of an object is
2413 // classified recursively so that always two fields are
2414 // considered. The resulting class is calculated according to
2415 // the classes of the fields in the eightbyte:
2417 // (a) If both classes are equal, this is the resulting class.
2419 // (b) If one of the classes is NO_CLASS, the resulting class is
2422 // (c) If one of the classes is MEMORY, the result is the MEMORY
2425 // (d) If one of the classes is INTEGER, the result is the
2428 // (e) If one of the classes is X87, X87UP, COMPLEX_X87 class,
2429 // MEMORY is used as class.
2431 // (f) Otherwise class SSE is used.
2433 // Accum should never be memory (we should have returned) or
2434 // ComplexX87 (because this cannot be passed in a structure).
2435 assert((Accum != Memory && Accum != ComplexX87) &&
2436 "Invalid accumulated classification during merge.");
2437 if (Accum == Field || Field == NoClass)
2439 if (Field == Memory)
2441 if (Accum == NoClass)
2443 if (Accum == Integer || Field == Integer)
2445 if (Field == X87 || Field == X87Up || Field == ComplexX87 ||
2446 Accum == X87 || Accum == X87Up)
2451 void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase,
2452 Class &Lo, Class &Hi, bool isNamedArg) const {
2453 // FIXME: This code can be simplified by introducing a simple value class for
2454 // Class pairs with appropriate constructor methods for the various
2457 // FIXME: Some of the split computations are wrong; unaligned vectors
2458 // shouldn't be passed in registers for example, so there is no chance they
2459 // can straddle an eightbyte. Verify & simplify.
2463 Class &Current = OffsetBase < 64 ? Lo : Hi;
2466 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
2467 BuiltinType::Kind k = BT->getKind();
2469 if (k == BuiltinType::Void) {
2471 } else if (k == BuiltinType::Int128 || k == BuiltinType::UInt128) {
2474 } else if (k >= BuiltinType::Bool && k <= BuiltinType::LongLong) {
2476 } else if (k == BuiltinType::Float || k == BuiltinType::Double) {
2478 } else if (k == BuiltinType::LongDouble) {
2479 const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat();
2480 if (LDF == &llvm::APFloat::IEEEquad()) {
2483 } else if (LDF == &llvm::APFloat::x87DoubleExtended()) {
2486 } else if (LDF == &llvm::APFloat::IEEEdouble()) {
2489 llvm_unreachable("unexpected long double representation!");
2491 // FIXME: _Decimal32 and _Decimal64 are SSE.
2492 // FIXME: _float128 and _Decimal128 are (SSE, SSEUp).
2496 if (const EnumType *ET = Ty->getAs<EnumType>()) {
2497 // Classify the underlying integer type.
2498 classify(ET->getDecl()->getIntegerType(), OffsetBase, Lo, Hi, isNamedArg);
2502 if (Ty->hasPointerRepresentation()) {
2507 if (Ty->isMemberPointerType()) {
2508 if (Ty->isMemberFunctionPointerType()) {
2509 if (Has64BitPointers) {
2510 // If Has64BitPointers, this is an {i64, i64}, so classify both
2514 // Otherwise, with 32-bit pointers, this is an {i32, i32}. If that
2515 // straddles an eightbyte boundary, Hi should be classified as well.
2516 uint64_t EB_FuncPtr = (OffsetBase) / 64;
2517 uint64_t EB_ThisAdj = (OffsetBase + 64 - 1) / 64;
2518 if (EB_FuncPtr != EB_ThisAdj) {
2530 if (const VectorType *VT = Ty->getAs<VectorType>()) {
2531 uint64_t Size = getContext().getTypeSize(VT);
2532 if (Size == 1 || Size == 8 || Size == 16 || Size == 32) {
2533 // gcc passes the following as integer:
2534 // 4 bytes - <4 x char>, <2 x short>, <1 x int>, <1 x float>
2535 // 2 bytes - <2 x char>, <1 x short>
2536 // 1 byte - <1 x char>
2539 // If this type crosses an eightbyte boundary, it should be
2541 uint64_t EB_Lo = (OffsetBase) / 64;
2542 uint64_t EB_Hi = (OffsetBase + Size - 1) / 64;
2545 } else if (Size == 64) {
2546 QualType ElementType = VT->getElementType();
2548 // gcc passes <1 x double> in memory. :(
2549 if (ElementType->isSpecificBuiltinType(BuiltinType::Double))
2552 // gcc passes <1 x long long> as SSE but clang used to unconditionally
2553 // pass them as integer. For platforms where clang is the de facto
2554 // platform compiler, we must continue to use integer.
2555 if (!classifyIntegerMMXAsSSE() &&
2556 (ElementType->isSpecificBuiltinType(BuiltinType::LongLong) ||
2557 ElementType->isSpecificBuiltinType(BuiltinType::ULongLong) ||
2558 ElementType->isSpecificBuiltinType(BuiltinType::Long) ||
2559 ElementType->isSpecificBuiltinType(BuiltinType::ULong)))
2564 // If this type crosses an eightbyte boundary, it should be
2566 if (OffsetBase && OffsetBase != 64)
2568 } else if (Size == 128 ||
2569 (isNamedArg && Size <= getNativeVectorSizeForAVXABI(AVXLevel))) {
2570 // Arguments of 256-bits are split into four eightbyte chunks. The
2571 // least significant one belongs to class SSE and all the others to class
2572 // SSEUP. The original Lo and Hi design considers that types can't be
2573 // greater than 128-bits, so a 64-bit split in Hi and Lo makes sense.
2574 // This design isn't correct for 256-bits, but since there're no cases
2575 // where the upper parts would need to be inspected, avoid adding
2576 // complexity and just consider Hi to match the 64-256 part.
2578 // Note that per 3.5.7 of AMD64-ABI, 256-bit args are only passed in
2579 // registers if they are "named", i.e. not part of the "..." of a
2580 // variadic function.
2582 // Similarly, per 3.2.3. of the AVX512 draft, 512-bits ("named") args are
2583 // split into eight eightbyte chunks, one SSE and seven SSEUP.
2590 if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
2591 QualType ET = getContext().getCanonicalType(CT->getElementType());
2593 uint64_t Size = getContext().getTypeSize(Ty);
2594 if (ET->isIntegralOrEnumerationType()) {
2597 else if (Size <= 128)
2599 } else if (ET == getContext().FloatTy) {
2601 } else if (ET == getContext().DoubleTy) {
2603 } else if (ET == getContext().LongDoubleTy) {
2604 const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat();
2605 if (LDF == &llvm::APFloat::IEEEquad())
2607 else if (LDF == &llvm::APFloat::x87DoubleExtended())
2608 Current = ComplexX87;
2609 else if (LDF == &llvm::APFloat::IEEEdouble())
2612 llvm_unreachable("unexpected long double representation!");
2615 // If this complex type crosses an eightbyte boundary then it
2617 uint64_t EB_Real = (OffsetBase) / 64;
2618 uint64_t EB_Imag = (OffsetBase + getContext().getTypeSize(ET)) / 64;
2619 if (Hi == NoClass && EB_Real != EB_Imag)
2625 if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
2626 // Arrays are treated like structures.
2628 uint64_t Size = getContext().getTypeSize(Ty);
2630 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
2631 // than eight eightbytes, ..., it has class MEMORY.
2635 // AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned
2636 // fields, it has class MEMORY.
2638 // Only need to check alignment of array base.
2639 if (OffsetBase % getContext().getTypeAlign(AT->getElementType()))
2642 // Otherwise implement simplified merge. We could be smarter about
2643 // this, but it isn't worth it and would be harder to verify.
2645 uint64_t EltSize = getContext().getTypeSize(AT->getElementType());
2646 uint64_t ArraySize = AT->getSize().getZExtValue();
2648 // The only case a 256-bit wide vector could be used is when the array
2649 // contains a single 256-bit element. Since Lo and Hi logic isn't extended
2650 // to work for sizes wider than 128, early check and fallback to memory.
2653 (Size != EltSize || Size > getNativeVectorSizeForAVXABI(AVXLevel)))
2656 for (uint64_t i=0, Offset=OffsetBase; i<ArraySize; ++i, Offset += EltSize) {
2657 Class FieldLo, FieldHi;
2658 classify(AT->getElementType(), Offset, FieldLo, FieldHi, isNamedArg);
2659 Lo = merge(Lo, FieldLo);
2660 Hi = merge(Hi, FieldHi);
2661 if (Lo == Memory || Hi == Memory)
2665 postMerge(Size, Lo, Hi);
2666 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp array classification.");
2670 if (const RecordType *RT = Ty->getAs<RecordType>()) {
2671 uint64_t Size = getContext().getTypeSize(Ty);
2673 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
2674 // than eight eightbytes, ..., it has class MEMORY.
2678 // AMD64-ABI 3.2.3p2: Rule 2. If a C++ object has either a non-trivial
2679 // copy constructor or a non-trivial destructor, it is passed by invisible
2681 if (getRecordArgABI(RT, getCXXABI()))
2684 const RecordDecl *RD = RT->getDecl();
2686 // Assume variable sized types are passed in memory.
2687 if (RD->hasFlexibleArrayMember())
2690 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
2692 // Reset Lo class, this will be recomputed.
2695 // If this is a C++ record, classify the bases first.
2696 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
2697 for (const auto &I : CXXRD->bases()) {
2698 assert(!I.isVirtual() && !I.getType()->isDependentType() &&
2699 "Unexpected base class!");
2700 const CXXRecordDecl *Base =
2701 cast<CXXRecordDecl>(I.getType()->getAs<RecordType>()->getDecl());
2703 // Classify this field.
2705 // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate exceeds a
2706 // single eightbyte, each is classified separately. Each eightbyte gets
2707 // initialized to class NO_CLASS.
2708 Class FieldLo, FieldHi;
2710 OffsetBase + getContext().toBits(Layout.getBaseClassOffset(Base));
2711 classify(I.getType(), Offset, FieldLo, FieldHi, isNamedArg);
2712 Lo = merge(Lo, FieldLo);
2713 Hi = merge(Hi, FieldHi);
2714 if (Lo == Memory || Hi == Memory) {
2715 postMerge(Size, Lo, Hi);
2721 // Classify the fields one at a time, merging the results.
2723 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
2724 i != e; ++i, ++idx) {
2725 uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx);
2726 bool BitField = i->isBitField();
2728 // Ignore padding bit-fields.
2729 if (BitField && i->isUnnamedBitfield())
2732 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger than
2733 // four eightbytes, or it contains unaligned fields, it has class MEMORY.
2735 // The only case a 256-bit wide vector could be used is when the struct
2736 // contains a single 256-bit element. Since Lo and Hi logic isn't extended
2737 // to work for sizes wider than 128, early check and fallback to memory.
2739 if (Size > 128 && (Size != getContext().getTypeSize(i->getType()) ||
2740 Size > getNativeVectorSizeForAVXABI(AVXLevel))) {
2742 postMerge(Size, Lo, Hi);
2745 // Note, skip this test for bit-fields, see below.
2746 if (!BitField && Offset % getContext().getTypeAlign(i->getType())) {
2748 postMerge(Size, Lo, Hi);
2752 // Classify this field.
2754 // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate
2755 // exceeds a single eightbyte, each is classified
2756 // separately. Each eightbyte gets initialized to class
2758 Class FieldLo, FieldHi;
2760 // Bit-fields require special handling, they do not force the
2761 // structure to be passed in memory even if unaligned, and
2762 // therefore they can straddle an eightbyte.
2764 assert(!i->isUnnamedBitfield());
2765 uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx);
2766 uint64_t Size = i->getBitWidthValue(getContext());
2768 uint64_t EB_Lo = Offset / 64;
2769 uint64_t EB_Hi = (Offset + Size - 1) / 64;
2772 assert(EB_Hi == EB_Lo && "Invalid classification, type > 16 bytes.");
2777 FieldHi = EB_Hi ? Integer : NoClass;
2780 classify(i->getType(), Offset, FieldLo, FieldHi, isNamedArg);
2781 Lo = merge(Lo, FieldLo);
2782 Hi = merge(Hi, FieldHi);
2783 if (Lo == Memory || Hi == Memory)
2787 postMerge(Size, Lo, Hi);
2791 ABIArgInfo X86_64ABIInfo::getIndirectReturnResult(QualType Ty) const {
2792 // If this is a scalar LLVM value then assume LLVM will pass it in the right
2794 if (!isAggregateTypeForABI(Ty)) {
2795 // Treat an enum type as its underlying type.
2796 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
2797 Ty = EnumTy->getDecl()->getIntegerType();
2799 return (Ty->isPromotableIntegerType() ?
2800 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
2803 return getNaturalAlignIndirect(Ty);
2806 bool X86_64ABIInfo::IsIllegalVectorType(QualType Ty) const {
2807 if (const VectorType *VecTy = Ty->getAs<VectorType>()) {
2808 uint64_t Size = getContext().getTypeSize(VecTy);
2809 unsigned LargestVector = getNativeVectorSizeForAVXABI(AVXLevel);
2810 if (Size <= 64 || Size > LargestVector)
2817 ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty,
2818 unsigned freeIntRegs) const {
2819 // If this is a scalar LLVM value then assume LLVM will pass it in the right
2822 // This assumption is optimistic, as there could be free registers available
2823 // when we need to pass this argument in memory, and LLVM could try to pass
2824 // the argument in the free register. This does not seem to happen currently,
2825 // but this code would be much safer if we could mark the argument with
2826 // 'onstack'. See PR12193.
2827 if (!isAggregateTypeForABI(Ty) && !IsIllegalVectorType(Ty)) {
2828 // Treat an enum type as its underlying type.
2829 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
2830 Ty = EnumTy->getDecl()->getIntegerType();
2832 return (Ty->isPromotableIntegerType() ?
2833 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
2836 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
2837 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
2839 // Compute the byval alignment. We specify the alignment of the byval in all
2840 // cases so that the mid-level optimizer knows the alignment of the byval.
2841 unsigned Align = std::max(getContext().getTypeAlign(Ty) / 8, 8U);
2843 // Attempt to avoid passing indirect results using byval when possible. This
2844 // is important for good codegen.
2846 // We do this by coercing the value into a scalar type which the backend can
2847 // handle naturally (i.e., without using byval).
2849 // For simplicity, we currently only do this when we have exhausted all of the
2850 // free integer registers. Doing this when there are free integer registers
2851 // would require more care, as we would have to ensure that the coerced value
2852 // did not claim the unused register. That would require either reording the
2853 // arguments to the function (so that any subsequent inreg values came first),
2854 // or only doing this optimization when there were no following arguments that
2857 // We currently expect it to be rare (particularly in well written code) for
2858 // arguments to be passed on the stack when there are still free integer
2859 // registers available (this would typically imply large structs being passed
2860 // by value), so this seems like a fair tradeoff for now.
2862 // We can revisit this if the backend grows support for 'onstack' parameter
2863 // attributes. See PR12193.
2864 if (freeIntRegs == 0) {
2865 uint64_t Size = getContext().getTypeSize(Ty);
2867 // If this type fits in an eightbyte, coerce it into the matching integral
2868 // type, which will end up on the stack (with alignment 8).
2869 if (Align == 8 && Size <= 64)
2870 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
2874 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(Align));
2877 /// The ABI specifies that a value should be passed in a full vector XMM/YMM
2878 /// register. Pick an LLVM IR type that will be passed as a vector register.
2879 llvm::Type *X86_64ABIInfo::GetByteVectorType(QualType Ty) const {
2880 // Wrapper structs/arrays that only contain vectors are passed just like
2881 // vectors; strip them off if present.
2882 if (const Type *InnerTy = isSingleElementStruct(Ty, getContext()))
2883 Ty = QualType(InnerTy, 0);
2885 llvm::Type *IRType = CGT.ConvertType(Ty);
2886 if (isa<llvm::VectorType>(IRType) ||
2887 IRType->getTypeID() == llvm::Type::FP128TyID)
2890 // We couldn't find the preferred IR vector type for 'Ty'.
2891 uint64_t Size = getContext().getTypeSize(Ty);
2892 assert((Size == 128 || Size == 256 || Size == 512) && "Invalid type found!");
2894 // Return a LLVM IR vector type based on the size of 'Ty'.
2895 return llvm::VectorType::get(llvm::Type::getDoubleTy(getVMContext()),
2899 /// BitsContainNoUserData - Return true if the specified [start,end) bit range
2900 /// is known to either be off the end of the specified type or being in
2901 /// alignment padding. The user type specified is known to be at most 128 bits
2902 /// in size, and have passed through X86_64ABIInfo::classify with a successful
2903 /// classification that put one of the two halves in the INTEGER class.
2905 /// It is conservatively correct to return false.
2906 static bool BitsContainNoUserData(QualType Ty, unsigned StartBit,
2907 unsigned EndBit, ASTContext &Context) {
2908 // If the bytes being queried are off the end of the type, there is no user
2909 // data hiding here. This handles analysis of builtins, vectors and other
2910 // types that don't contain interesting padding.
2911 unsigned TySize = (unsigned)Context.getTypeSize(Ty);
2912 if (TySize <= StartBit)
2915 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) {
2916 unsigned EltSize = (unsigned)Context.getTypeSize(AT->getElementType());
2917 unsigned NumElts = (unsigned)AT->getSize().getZExtValue();
2919 // Check each element to see if the element overlaps with the queried range.
2920 for (unsigned i = 0; i != NumElts; ++i) {
2921 // If the element is after the span we care about, then we're done..
2922 unsigned EltOffset = i*EltSize;
2923 if (EltOffset >= EndBit) break;
2925 unsigned EltStart = EltOffset < StartBit ? StartBit-EltOffset :0;
2926 if (!BitsContainNoUserData(AT->getElementType(), EltStart,
2927 EndBit-EltOffset, Context))
2930 // If it overlaps no elements, then it is safe to process as padding.
2934 if (const RecordType *RT = Ty->getAs<RecordType>()) {
2935 const RecordDecl *RD = RT->getDecl();
2936 const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
2938 // If this is a C++ record, check the bases first.
2939 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
2940 for (const auto &I : CXXRD->bases()) {
2941 assert(!I.isVirtual() && !I.getType()->isDependentType() &&
2942 "Unexpected base class!");
2943 const CXXRecordDecl *Base =
2944 cast<CXXRecordDecl>(I.getType()->getAs<RecordType>()->getDecl());
2946 // If the base is after the span we care about, ignore it.
2947 unsigned BaseOffset = Context.toBits(Layout.getBaseClassOffset(Base));
2948 if (BaseOffset >= EndBit) continue;
2950 unsigned BaseStart = BaseOffset < StartBit ? StartBit-BaseOffset :0;
2951 if (!BitsContainNoUserData(I.getType(), BaseStart,
2952 EndBit-BaseOffset, Context))
2957 // Verify that no field has data that overlaps the region of interest. Yes
2958 // this could be sped up a lot by being smarter about queried fields,
2959 // however we're only looking at structs up to 16 bytes, so we don't care
2962 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
2963 i != e; ++i, ++idx) {
2964 unsigned FieldOffset = (unsigned)Layout.getFieldOffset(idx);
2966 // If we found a field after the region we care about, then we're done.
2967 if (FieldOffset >= EndBit) break;
2969 unsigned FieldStart = FieldOffset < StartBit ? StartBit-FieldOffset :0;
2970 if (!BitsContainNoUserData(i->getType(), FieldStart, EndBit-FieldOffset,
2975 // If nothing in this record overlapped the area of interest, then we're
2983 /// ContainsFloatAtOffset - Return true if the specified LLVM IR type has a
2984 /// float member at the specified offset. For example, {int,{float}} has a
2985 /// float at offset 4. It is conservatively correct for this routine to return
2987 static bool ContainsFloatAtOffset(llvm::Type *IRType, unsigned IROffset,
2988 const llvm::DataLayout &TD) {
2989 // Base case if we find a float.
2990 if (IROffset == 0 && IRType->isFloatTy())
2993 // If this is a struct, recurse into the field at the specified offset.
2994 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {
2995 const llvm::StructLayout *SL = TD.getStructLayout(STy);
2996 unsigned Elt = SL->getElementContainingOffset(IROffset);
2997 IROffset -= SL->getElementOffset(Elt);
2998 return ContainsFloatAtOffset(STy->getElementType(Elt), IROffset, TD);
3001 // If this is an array, recurse into the field at the specified offset.
3002 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
3003 llvm::Type *EltTy = ATy->getElementType();
3004 unsigned EltSize = TD.getTypeAllocSize(EltTy);
3005 IROffset -= IROffset/EltSize*EltSize;
3006 return ContainsFloatAtOffset(EltTy, IROffset, TD);
3013 /// GetSSETypeAtOffset - Return a type that will be passed by the backend in the
3014 /// low 8 bytes of an XMM register, corresponding to the SSE class.
3015 llvm::Type *X86_64ABIInfo::
3016 GetSSETypeAtOffset(llvm::Type *IRType, unsigned IROffset,
3017 QualType SourceTy, unsigned SourceOffset) const {
3018 // The only three choices we have are either double, <2 x float>, or float. We
3019 // pass as float if the last 4 bytes is just padding. This happens for
3020 // structs that contain 3 floats.
3021 if (BitsContainNoUserData(SourceTy, SourceOffset*8+32,
3022 SourceOffset*8+64, getContext()))
3023 return llvm::Type::getFloatTy(getVMContext());
3025 // We want to pass as <2 x float> if the LLVM IR type contains a float at
3026 // offset+0 and offset+4. Walk the LLVM IR type to find out if this is the
3028 if (ContainsFloatAtOffset(IRType, IROffset, getDataLayout()) &&
3029 ContainsFloatAtOffset(IRType, IROffset+4, getDataLayout()))
3030 return llvm::VectorType::get(llvm::Type::getFloatTy(getVMContext()), 2);
3032 return llvm::Type::getDoubleTy(getVMContext());
3036 /// GetINTEGERTypeAtOffset - The ABI specifies that a value should be passed in
3037 /// an 8-byte GPR. This means that we either have a scalar or we are talking
3038 /// about the high or low part of an up-to-16-byte struct. This routine picks
3039 /// the best LLVM IR type to represent this, which may be i64 or may be anything
3040 /// else that the backend will pass in a GPR that works better (e.g. i8, %foo*,
3043 /// PrefType is an LLVM IR type that corresponds to (part of) the IR type for
3044 /// the source type. IROffset is an offset in bytes into the LLVM IR type that
3045 /// the 8-byte value references. PrefType may be null.
3047 /// SourceTy is the source-level type for the entire argument. SourceOffset is
3048 /// an offset into this that we're processing (which is always either 0 or 8).
3050 llvm::Type *X86_64ABIInfo::
3051 GetINTEGERTypeAtOffset(llvm::Type *IRType, unsigned IROffset,
3052 QualType SourceTy, unsigned SourceOffset) const {
3053 // If we're dealing with an un-offset LLVM IR type, then it means that we're
3054 // returning an 8-byte unit starting with it. See if we can safely use it.
3055 if (IROffset == 0) {
3056 // Pointers and int64's always fill the 8-byte unit.
3057 if ((isa<llvm::PointerType>(IRType) && Has64BitPointers) ||
3058 IRType->isIntegerTy(64))
3061 // If we have a 1/2/4-byte integer, we can use it only if the rest of the
3062 // goodness in the source type is just tail padding. This is allowed to
3063 // kick in for struct {double,int} on the int, but not on
3064 // struct{double,int,int} because we wouldn't return the second int. We
3065 // have to do this analysis on the source type because we can't depend on
3066 // unions being lowered a specific way etc.
3067 if (IRType->isIntegerTy(8) || IRType->isIntegerTy(16) ||
3068 IRType->isIntegerTy(32) ||
3069 (isa<llvm::PointerType>(IRType) && !Has64BitPointers)) {
3070 unsigned BitWidth = isa<llvm::PointerType>(IRType) ? 32 :
3071 cast<llvm::IntegerType>(IRType)->getBitWidth();
3073 if (BitsContainNoUserData(SourceTy, SourceOffset*8+BitWidth,
3074 SourceOffset*8+64, getContext()))
3079 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {
3080 // If this is a struct, recurse into the field at the specified offset.
3081 const llvm::StructLayout *SL = getDataLayout().getStructLayout(STy);
3082 if (IROffset < SL->getSizeInBytes()) {
3083 unsigned FieldIdx = SL->getElementContainingOffset(IROffset);
3084 IROffset -= SL->getElementOffset(FieldIdx);
3086 return GetINTEGERTypeAtOffset(STy->getElementType(FieldIdx), IROffset,
3087 SourceTy, SourceOffset);
3091 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
3092 llvm::Type *EltTy = ATy->getElementType();
3093 unsigned EltSize = getDataLayout().getTypeAllocSize(EltTy);
3094 unsigned EltOffset = IROffset/EltSize*EltSize;
3095 return GetINTEGERTypeAtOffset(EltTy, IROffset-EltOffset, SourceTy,
3099 // Okay, we don't have any better idea of what to pass, so we pass this in an
3100 // integer register that isn't too big to fit the rest of the struct.
3101 unsigned TySizeInBytes =
3102 (unsigned)getContext().getTypeSizeInChars(SourceTy).getQuantity();
3104 assert(TySizeInBytes != SourceOffset && "Empty field?");
3106 // It is always safe to classify this as an integer type up to i64 that
3107 // isn't larger than the structure.
3108 return llvm::IntegerType::get(getVMContext(),
3109 std::min(TySizeInBytes-SourceOffset, 8U)*8);
3113 /// GetX86_64ByValArgumentPair - Given a high and low type that can ideally
3114 /// be used as elements of a two register pair to pass or return, return a
3115 /// first class aggregate to represent them. For example, if the low part of
3116 /// a by-value argument should be passed as i32* and the high part as float,
3117 /// return {i32*, float}.
3119 GetX86_64ByValArgumentPair(llvm::Type *Lo, llvm::Type *Hi,
3120 const llvm::DataLayout &TD) {
3121 // In order to correctly satisfy the ABI, we need to the high part to start
3122 // at offset 8. If the high and low parts we inferred are both 4-byte types
3123 // (e.g. i32 and i32) then the resultant struct type ({i32,i32}) won't have
3124 // the second element at offset 8. Check for this:
3125 unsigned LoSize = (unsigned)TD.getTypeAllocSize(Lo);
3126 unsigned HiAlign = TD.getABITypeAlignment(Hi);
3127 unsigned HiStart = llvm::alignTo(LoSize, HiAlign);
3128 assert(HiStart != 0 && HiStart <= 8 && "Invalid x86-64 argument pair!");
3130 // To handle this, we have to increase the size of the low part so that the
3131 // second element will start at an 8 byte offset. We can't increase the size
3132 // of the second element because it might make us access off the end of the
3135 // There are usually two sorts of types the ABI generation code can produce
3136 // for the low part of a pair that aren't 8 bytes in size: float or
3137 // i8/i16/i32. This can also include pointers when they are 32-bit (X32 and
3139 // Promote these to a larger type.
3140 if (Lo->isFloatTy())
3141 Lo = llvm::Type::getDoubleTy(Lo->getContext());
3143 assert((Lo->isIntegerTy() || Lo->isPointerTy())
3144 && "Invalid/unknown lo type");
3145 Lo = llvm::Type::getInt64Ty(Lo->getContext());
3149 llvm::StructType *Result = llvm::StructType::get(Lo, Hi, nullptr);
3152 // Verify that the second element is at an 8-byte offset.
3153 assert(TD.getStructLayout(Result)->getElementOffset(1) == 8 &&
3154 "Invalid x86-64 argument pair!");
3158 ABIArgInfo X86_64ABIInfo::
3159 classifyReturnType(QualType RetTy) const {
3160 // AMD64-ABI 3.2.3p4: Rule 1. Classify the return type with the
3161 // classification algorithm.
3162 X86_64ABIInfo::Class Lo, Hi;
3163 classify(RetTy, 0, Lo, Hi, /*isNamedArg*/ true);
3165 // Check some invariants.
3166 assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.");
3167 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.");
3169 llvm::Type *ResType = nullptr;
3173 return ABIArgInfo::getIgnore();
3174 // If the low part is just padding, it takes no register, leave ResType
3176 assert((Hi == SSE || Hi == Integer || Hi == X87Up) &&
3177 "Unknown missing lo part");
3182 llvm_unreachable("Invalid classification for lo word.");
3184 // AMD64-ABI 3.2.3p4: Rule 2. Types of class memory are returned via
3187 return getIndirectReturnResult(RetTy);
3189 // AMD64-ABI 3.2.3p4: Rule 3. If the class is INTEGER, the next
3190 // available register of the sequence %rax, %rdx is used.
3192 ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0);
3194 // If we have a sign or zero extended integer, make sure to return Extend
3195 // so that the parameter gets the right LLVM IR attributes.
3196 if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) {
3197 // Treat an enum type as its underlying type.
3198 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
3199 RetTy = EnumTy->getDecl()->getIntegerType();
3201 if (RetTy->isIntegralOrEnumerationType() &&
3202 RetTy->isPromotableIntegerType())
3203 return ABIArgInfo::getExtend();
3207 // AMD64-ABI 3.2.3p4: Rule 4. If the class is SSE, the next
3208 // available SSE register of the sequence %xmm0, %xmm1 is used.
3210 ResType = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0);
3213 // AMD64-ABI 3.2.3p4: Rule 6. If the class is X87, the value is
3214 // returned on the X87 stack in %st0 as 80-bit x87 number.
3216 ResType = llvm::Type::getX86_FP80Ty(getVMContext());
3219 // AMD64-ABI 3.2.3p4: Rule 8. If the class is COMPLEX_X87, the real
3220 // part of the value is returned in %st0 and the imaginary part in
3223 assert(Hi == ComplexX87 && "Unexpected ComplexX87 classification.");
3224 ResType = llvm::StructType::get(llvm::Type::getX86_FP80Ty(getVMContext()),
3225 llvm::Type::getX86_FP80Ty(getVMContext()),
3230 llvm::Type *HighPart = nullptr;
3232 // Memory was handled previously and X87 should
3233 // never occur as a hi class.
3236 llvm_unreachable("Invalid classification for hi word.");
3238 case ComplexX87: // Previously handled.
3243 HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
3244 if (Lo == NoClass) // Return HighPart at offset 8 in memory.
3245 return ABIArgInfo::getDirect(HighPart, 8);
3248 HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
3249 if (Lo == NoClass) // Return HighPart at offset 8 in memory.
3250 return ABIArgInfo::getDirect(HighPart, 8);
3253 // AMD64-ABI 3.2.3p4: Rule 5. If the class is SSEUP, the eightbyte
3254 // is passed in the next available eightbyte chunk if the last used
3257 // SSEUP should always be preceded by SSE, just widen.
3259 assert(Lo == SSE && "Unexpected SSEUp classification.");
3260 ResType = GetByteVectorType(RetTy);
3263 // AMD64-ABI 3.2.3p4: Rule 7. If the class is X87UP, the value is
3264 // returned together with the previous X87 value in %st0.
3266 // If X87Up is preceded by X87, we don't need to do
3267 // anything. However, in some cases with unions it may not be
3268 // preceded by X87. In such situations we follow gcc and pass the
3269 // extra bits in an SSE reg.
3271 HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
3272 if (Lo == NoClass) // Return HighPart at offset 8 in memory.
3273 return ABIArgInfo::getDirect(HighPart, 8);
3278 // If a high part was specified, merge it together with the low part. It is
3279 // known to pass in the high eightbyte of the result. We do this by forming a
3280 // first class struct aggregate with the high and low part: {low, high}
3282 ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getDataLayout());
3284 return ABIArgInfo::getDirect(ResType);
3287 ABIArgInfo X86_64ABIInfo::classifyArgumentType(
3288 QualType Ty, unsigned freeIntRegs, unsigned &neededInt, unsigned &neededSSE,
3292 Ty = useFirstFieldIfTransparentUnion(Ty);
3294 X86_64ABIInfo::Class Lo, Hi;
3295 classify(Ty, 0, Lo, Hi, isNamedArg);
3297 // Check some invariants.
3298 // FIXME: Enforce these by construction.
3299 assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.");
3300 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.");
3304 llvm::Type *ResType = nullptr;
3308 return ABIArgInfo::getIgnore();
3309 // If the low part is just padding, it takes no register, leave ResType
3311 assert((Hi == SSE || Hi == Integer || Hi == X87Up) &&
3312 "Unknown missing lo part");
3315 // AMD64-ABI 3.2.3p3: Rule 1. If the class is MEMORY, pass the argument
3319 // AMD64-ABI 3.2.3p3: Rule 5. If the class is X87, X87UP or
3320 // COMPLEX_X87, it is passed in memory.
3323 if (getRecordArgABI(Ty, getCXXABI()) == CGCXXABI::RAA_Indirect)
3325 return getIndirectResult(Ty, freeIntRegs);
3329 llvm_unreachable("Invalid classification for lo word.");
3331 // AMD64-ABI 3.2.3p3: Rule 2. If the class is INTEGER, the next
3332 // available register of the sequence %rdi, %rsi, %rdx, %rcx, %r8
3337 // Pick an 8-byte type based on the preferred type.
3338 ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 0, Ty, 0);
3340 // If we have a sign or zero extended integer, make sure to return Extend
3341 // so that the parameter gets the right LLVM IR attributes.
3342 if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) {
3343 // Treat an enum type as its underlying type.
3344 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
3345 Ty = EnumTy->getDecl()->getIntegerType();
3347 if (Ty->isIntegralOrEnumerationType() &&
3348 Ty->isPromotableIntegerType())
3349 return ABIArgInfo::getExtend();
3354 // AMD64-ABI 3.2.3p3: Rule 3. If the class is SSE, the next
3355 // available SSE register is used, the registers are taken in the
3356 // order from %xmm0 to %xmm7.
3358 llvm::Type *IRType = CGT.ConvertType(Ty);
3359 ResType = GetSSETypeAtOffset(IRType, 0, Ty, 0);
3365 llvm::Type *HighPart = nullptr;
3367 // Memory was handled previously, ComplexX87 and X87 should
3368 // never occur as hi classes, and X87Up must be preceded by X87,
3369 // which is passed in memory.
3373 llvm_unreachable("Invalid classification for hi word.");
3375 case NoClass: break;
3379 // Pick an 8-byte type based on the preferred type.
3380 HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8);
3382 if (Lo == NoClass) // Pass HighPart at offset 8 in memory.
3383 return ABIArgInfo::getDirect(HighPart, 8);
3386 // X87Up generally doesn't occur here (long double is passed in
3387 // memory), except in situations involving unions.
3390 HighPart = GetSSETypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8);
3392 if (Lo == NoClass) // Pass HighPart at offset 8 in memory.
3393 return ABIArgInfo::getDirect(HighPart, 8);
3398 // AMD64-ABI 3.2.3p3: Rule 4. If the class is SSEUP, the
3399 // eightbyte is passed in the upper half of the last used SSE
3400 // register. This only happens when 128-bit vectors are passed.
3402 assert(Lo == SSE && "Unexpected SSEUp classification");
3403 ResType = GetByteVectorType(Ty);
3407 // If a high part was specified, merge it together with the low part. It is
3408 // known to pass in the high eightbyte of the result. We do this by forming a
3409 // first class struct aggregate with the high and low part: {low, high}
3411 ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getDataLayout());
3413 return ABIArgInfo::getDirect(ResType);
3417 X86_64ABIInfo::classifyRegCallStructTypeImpl(QualType Ty, unsigned &NeededInt,
3418 unsigned &NeededSSE) const {
3419 auto RT = Ty->getAs<RecordType>();
3420 assert(RT && "classifyRegCallStructType only valid with struct types");
3422 if (RT->getDecl()->hasFlexibleArrayMember())
3423 return getIndirectReturnResult(Ty);
3426 if (auto CXXRD = dyn_cast<CXXRecordDecl>(RT->getDecl())) {
3427 if (CXXRD->isDynamicClass()) {
3428 NeededInt = NeededSSE = 0;
3429 return getIndirectReturnResult(Ty);
3432 for (const auto &I : CXXRD->bases())
3433 if (classifyRegCallStructTypeImpl(I.getType(), NeededInt, NeededSSE)
3435 NeededInt = NeededSSE = 0;
3436 return getIndirectReturnResult(Ty);
3441 for (const auto *FD : RT->getDecl()->fields()) {
3442 if (FD->getType()->isRecordType() && !FD->getType()->isUnionType()) {
3443 if (classifyRegCallStructTypeImpl(FD->getType(), NeededInt, NeededSSE)
3445 NeededInt = NeededSSE = 0;
3446 return getIndirectReturnResult(Ty);
3449 unsigned LocalNeededInt, LocalNeededSSE;
3450 if (classifyArgumentType(FD->getType(), UINT_MAX, LocalNeededInt,
3451 LocalNeededSSE, true)
3453 NeededInt = NeededSSE = 0;
3454 return getIndirectReturnResult(Ty);
3456 NeededInt += LocalNeededInt;
3457 NeededSSE += LocalNeededSSE;
3461 return ABIArgInfo::getDirect();
3464 ABIArgInfo X86_64ABIInfo::classifyRegCallStructType(QualType Ty,
3465 unsigned &NeededInt,
3466 unsigned &NeededSSE) const {
3471 return classifyRegCallStructTypeImpl(Ty, NeededInt, NeededSSE);
3474 void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const {
3476 bool IsRegCall = FI.getCallingConvention() == llvm::CallingConv::X86_RegCall;
3478 // Keep track of the number of assigned registers.
3479 unsigned FreeIntRegs = IsRegCall ? 11 : 6;
3480 unsigned FreeSSERegs = IsRegCall ? 16 : 8;
3481 unsigned NeededInt, NeededSSE;
3483 if (IsRegCall && FI.getReturnType()->getTypePtr()->isRecordType() &&
3484 !FI.getReturnType()->getTypePtr()->isUnionType()) {
3485 FI.getReturnInfo() =
3486 classifyRegCallStructType(FI.getReturnType(), NeededInt, NeededSSE);
3487 if (FreeIntRegs >= NeededInt && FreeSSERegs >= NeededSSE) {
3488 FreeIntRegs -= NeededInt;
3489 FreeSSERegs -= NeededSSE;
3491 FI.getReturnInfo() = getIndirectReturnResult(FI.getReturnType());
3493 } else if (!getCXXABI().classifyReturnType(FI))
3494 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
3496 // If the return value is indirect, then the hidden argument is consuming one
3497 // integer register.
3498 if (FI.getReturnInfo().isIndirect())
3501 // The chain argument effectively gives us another free register.
3502 if (FI.isChainCall())
3505 unsigned NumRequiredArgs = FI.getNumRequiredArgs();
3506 // AMD64-ABI 3.2.3p3: Once arguments are classified, the registers
3507 // get assigned (in left-to-right order) for passing as follows...
3509 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
3510 it != ie; ++it, ++ArgNo) {
3511 bool IsNamedArg = ArgNo < NumRequiredArgs;
3513 if (IsRegCall && it->type->isStructureOrClassType())
3514 it->info = classifyRegCallStructType(it->type, NeededInt, NeededSSE);
3516 it->info = classifyArgumentType(it->type, FreeIntRegs, NeededInt,
3517 NeededSSE, IsNamedArg);
3519 // AMD64-ABI 3.2.3p3: If there are no registers available for any
3520 // eightbyte of an argument, the whole argument is passed on the
3521 // stack. If registers have already been assigned for some
3522 // eightbytes of such an argument, the assignments get reverted.
3523 if (FreeIntRegs >= NeededInt && FreeSSERegs >= NeededSSE) {
3524 FreeIntRegs -= NeededInt;
3525 FreeSSERegs -= NeededSSE;
3527 it->info = getIndirectResult(it->type, FreeIntRegs);
3532 static Address EmitX86_64VAArgFromMemory(CodeGenFunction &CGF,
3533 Address VAListAddr, QualType Ty) {
3534 Address overflow_arg_area_p = CGF.Builder.CreateStructGEP(
3535 VAListAddr, 2, CharUnits::fromQuantity(8), "overflow_arg_area_p");
3536 llvm::Value *overflow_arg_area =
3537 CGF.Builder.CreateLoad(overflow_arg_area_p, "overflow_arg_area");
3539 // AMD64-ABI 3.5.7p5: Step 7. Align l->overflow_arg_area upwards to a 16
3540 // byte boundary if alignment needed by type exceeds 8 byte boundary.
3541 // It isn't stated explicitly in the standard, but in practice we use
3542 // alignment greater than 16 where necessary.
3543 CharUnits Align = CGF.getContext().getTypeAlignInChars(Ty);
3544 if (Align > CharUnits::fromQuantity(8)) {
3545 overflow_arg_area = emitRoundPointerUpToAlignment(CGF, overflow_arg_area,
3549 // AMD64-ABI 3.5.7p5: Step 8. Fetch type from l->overflow_arg_area.
3550 llvm::Type *LTy = CGF.ConvertTypeForMem(Ty);
3552 CGF.Builder.CreateBitCast(overflow_arg_area,
3553 llvm::PointerType::getUnqual(LTy));
3555 // AMD64-ABI 3.5.7p5: Step 9. Set l->overflow_arg_area to:
3556 // l->overflow_arg_area + sizeof(type).
3557 // AMD64-ABI 3.5.7p5: Step 10. Align l->overflow_arg_area upwards to
3558 // an 8 byte boundary.
3560 uint64_t SizeInBytes = (CGF.getContext().getTypeSize(Ty) + 7) / 8;
3561 llvm::Value *Offset =
3562 llvm::ConstantInt::get(CGF.Int32Ty, (SizeInBytes + 7) & ~7);
3563 overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset,
3564 "overflow_arg_area.next");
3565 CGF.Builder.CreateStore(overflow_arg_area, overflow_arg_area_p);
3567 // AMD64-ABI 3.5.7p5: Step 11. Return the fetched type.
3568 return Address(Res, Align);
3571 Address X86_64ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
3572 QualType Ty) const {
3573 // Assume that va_list type is correct; should be pointer to LLVM type:
3577 // i8* overflow_arg_area;
3578 // i8* reg_save_area;
3580 unsigned neededInt, neededSSE;
3582 Ty = getContext().getCanonicalType(Ty);
3583 ABIArgInfo AI = classifyArgumentType(Ty, 0, neededInt, neededSSE,
3584 /*isNamedArg*/false);
3586 // AMD64-ABI 3.5.7p5: Step 1. Determine whether type may be passed
3587 // in the registers. If not go to step 7.
3588 if (!neededInt && !neededSSE)
3589 return EmitX86_64VAArgFromMemory(CGF, VAListAddr, Ty);
3591 // AMD64-ABI 3.5.7p5: Step 2. Compute num_gp to hold the number of
3592 // general purpose registers needed to pass type and num_fp to hold
3593 // the number of floating point registers needed.
3595 // AMD64-ABI 3.5.7p5: Step 3. Verify whether arguments fit into
3596 // registers. In the case: l->gp_offset > 48 - num_gp * 8 or
3597 // l->fp_offset > 304 - num_fp * 16 go to step 7.
3599 // NOTE: 304 is a typo, there are (6 * 8 + 8 * 16) = 176 bytes of
3600 // register save space).
3602 llvm::Value *InRegs = nullptr;
3603 Address gp_offset_p = Address::invalid(), fp_offset_p = Address::invalid();
3604 llvm::Value *gp_offset = nullptr, *fp_offset = nullptr;
3607 CGF.Builder.CreateStructGEP(VAListAddr, 0, CharUnits::Zero(),
3609 gp_offset = CGF.Builder.CreateLoad(gp_offset_p, "gp_offset");
3610 InRegs = llvm::ConstantInt::get(CGF.Int32Ty, 48 - neededInt * 8);
3611 InRegs = CGF.Builder.CreateICmpULE(gp_offset, InRegs, "fits_in_gp");
3616 CGF.Builder.CreateStructGEP(VAListAddr, 1, CharUnits::fromQuantity(4),
3618 fp_offset = CGF.Builder.CreateLoad(fp_offset_p, "fp_offset");
3619 llvm::Value *FitsInFP =
3620 llvm::ConstantInt::get(CGF.Int32Ty, 176 - neededSSE * 16);
3621 FitsInFP = CGF.Builder.CreateICmpULE(fp_offset, FitsInFP, "fits_in_fp");
3622 InRegs = InRegs ? CGF.Builder.CreateAnd(InRegs, FitsInFP) : FitsInFP;
3625 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
3626 llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem");
3627 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
3628 CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock);
3630 // Emit code to load the value if it was passed in registers.
3632 CGF.EmitBlock(InRegBlock);
3634 // AMD64-ABI 3.5.7p5: Step 4. Fetch type from l->reg_save_area with
3635 // an offset of l->gp_offset and/or l->fp_offset. This may require
3636 // copying to a temporary location in case the parameter is passed
3637 // in different register classes or requires an alignment greater
3638 // than 8 for general purpose registers and 16 for XMM registers.
3640 // FIXME: This really results in shameful code when we end up needing to
3641 // collect arguments from different places; often what should result in a
3642 // simple assembling of a structure from scattered addresses has many more
3643 // loads than necessary. Can we clean this up?
3644 llvm::Type *LTy = CGF.ConvertTypeForMem(Ty);
3645 llvm::Value *RegSaveArea = CGF.Builder.CreateLoad(
3646 CGF.Builder.CreateStructGEP(VAListAddr, 3, CharUnits::fromQuantity(16)),
3649 Address RegAddr = Address::invalid();
3650 if (neededInt && neededSSE) {
3652 assert(AI.isDirect() && "Unexpected ABI info for mixed regs");
3653 llvm::StructType *ST = cast<llvm::StructType>(AI.getCoerceToType());
3654 Address Tmp = CGF.CreateMemTemp(Ty);
3655 Tmp = CGF.Builder.CreateElementBitCast(Tmp, ST);
3656 assert(ST->getNumElements() == 2 && "Unexpected ABI info for mixed regs");
3657 llvm::Type *TyLo = ST->getElementType(0);
3658 llvm::Type *TyHi = ST->getElementType(1);
3659 assert((TyLo->isFPOrFPVectorTy() ^ TyHi->isFPOrFPVectorTy()) &&
3660 "Unexpected ABI info for mixed regs");
3661 llvm::Type *PTyLo = llvm::PointerType::getUnqual(TyLo);
3662 llvm::Type *PTyHi = llvm::PointerType::getUnqual(TyHi);
3663 llvm::Value *GPAddr = CGF.Builder.CreateGEP(RegSaveArea, gp_offset);
3664 llvm::Value *FPAddr = CGF.Builder.CreateGEP(RegSaveArea, fp_offset);
3665 llvm::Value *RegLoAddr = TyLo->isFPOrFPVectorTy() ? FPAddr : GPAddr;
3666 llvm::Value *RegHiAddr = TyLo->isFPOrFPVectorTy() ? GPAddr : FPAddr;
3668 // Copy the first element.
3669 // FIXME: Our choice of alignment here and below is probably pessimistic.
3670 llvm::Value *V = CGF.Builder.CreateAlignedLoad(
3671 TyLo, CGF.Builder.CreateBitCast(RegLoAddr, PTyLo),
3672 CharUnits::fromQuantity(getDataLayout().getABITypeAlignment(TyLo)));
3673 CGF.Builder.CreateStore(V,
3674 CGF.Builder.CreateStructGEP(Tmp, 0, CharUnits::Zero()));
3676 // Copy the second element.
3677 V = CGF.Builder.CreateAlignedLoad(
3678 TyHi, CGF.Builder.CreateBitCast(RegHiAddr, PTyHi),
3679 CharUnits::fromQuantity(getDataLayout().getABITypeAlignment(TyHi)));
3680 CharUnits Offset = CharUnits::fromQuantity(
3681 getDataLayout().getStructLayout(ST)->getElementOffset(1));
3682 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1, Offset));
3684 RegAddr = CGF.Builder.CreateElementBitCast(Tmp, LTy);
3685 } else if (neededInt) {
3686 RegAddr = Address(CGF.Builder.CreateGEP(RegSaveArea, gp_offset),
3687 CharUnits::fromQuantity(8));
3688 RegAddr = CGF.Builder.CreateElementBitCast(RegAddr, LTy);
3690 // Copy to a temporary if necessary to ensure the appropriate alignment.
3691 std::pair<CharUnits, CharUnits> SizeAlign =
3692 getContext().getTypeInfoInChars(Ty);
3693 uint64_t TySize = SizeAlign.first.getQuantity();
3694 CharUnits TyAlign = SizeAlign.second;
3696 // Copy into a temporary if the type is more aligned than the
3697 // register save area.
3698 if (TyAlign.getQuantity() > 8) {
3699 Address Tmp = CGF.CreateMemTemp(Ty);
3700 CGF.Builder.CreateMemCpy(Tmp, RegAddr, TySize, false);
3704 } else if (neededSSE == 1) {
3705 RegAddr = Address(CGF.Builder.CreateGEP(RegSaveArea, fp_offset),
3706 CharUnits::fromQuantity(16));
3707 RegAddr = CGF.Builder.CreateElementBitCast(RegAddr, LTy);
3709 assert(neededSSE == 2 && "Invalid number of needed registers!");
3710 // SSE registers are spaced 16 bytes apart in the register save
3711 // area, we need to collect the two eightbytes together.
3712 // The ABI isn't explicit about this, but it seems reasonable
3713 // to assume that the slots are 16-byte aligned, since the stack is
3714 // naturally 16-byte aligned and the prologue is expected to store
3715 // all the SSE registers to the RSA.
3716 Address RegAddrLo = Address(CGF.Builder.CreateGEP(RegSaveArea, fp_offset),
3717 CharUnits::fromQuantity(16));
3719 CGF.Builder.CreateConstInBoundsByteGEP(RegAddrLo,
3720 CharUnits::fromQuantity(16));
3721 llvm::Type *DoubleTy = CGF.DoubleTy;
3722 llvm::StructType *ST = llvm::StructType::get(DoubleTy, DoubleTy, nullptr);
3724 Address Tmp = CGF.CreateMemTemp(Ty);
3725 Tmp = CGF.Builder.CreateElementBitCast(Tmp, ST);
3726 V = CGF.Builder.CreateLoad(
3727 CGF.Builder.CreateElementBitCast(RegAddrLo, DoubleTy));
3728 CGF.Builder.CreateStore(V,
3729 CGF.Builder.CreateStructGEP(Tmp, 0, CharUnits::Zero()));
3730 V = CGF.Builder.CreateLoad(
3731 CGF.Builder.CreateElementBitCast(RegAddrHi, DoubleTy));
3732 CGF.Builder.CreateStore(V,
3733 CGF.Builder.CreateStructGEP(Tmp, 1, CharUnits::fromQuantity(8)));
3735 RegAddr = CGF.Builder.CreateElementBitCast(Tmp, LTy);
3738 // AMD64-ABI 3.5.7p5: Step 5. Set:
3739 // l->gp_offset = l->gp_offset + num_gp * 8
3740 // l->fp_offset = l->fp_offset + num_fp * 16.
3742 llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededInt * 8);
3743 CGF.Builder.CreateStore(CGF.Builder.CreateAdd(gp_offset, Offset),
3747 llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededSSE * 16);
3748 CGF.Builder.CreateStore(CGF.Builder.CreateAdd(fp_offset, Offset),
3751 CGF.EmitBranch(ContBlock);
3753 // Emit code to load the value if it was passed in memory.
3755 CGF.EmitBlock(InMemBlock);
3756 Address MemAddr = EmitX86_64VAArgFromMemory(CGF, VAListAddr, Ty);
3758 // Return the appropriate result.
3760 CGF.EmitBlock(ContBlock);
3761 Address ResAddr = emitMergePHI(CGF, RegAddr, InRegBlock, MemAddr, InMemBlock,
3766 Address X86_64ABIInfo::EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
3767 QualType Ty) const {
3768 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*indirect*/ false,
3769 CGF.getContext().getTypeInfoInChars(Ty),
3770 CharUnits::fromQuantity(8),
3771 /*allowHigherAlign*/ false);
3775 WinX86_64ABIInfo::reclassifyHvaArgType(QualType Ty, unsigned &FreeSSERegs,
3776 const ABIArgInfo ¤t) const {
3777 // Assumes vectorCall calling convention.
3778 const Type *Base = nullptr;
3779 uint64_t NumElts = 0;
3781 if (!Ty->isBuiltinType() && !Ty->isVectorType() &&
3782 isHomogeneousAggregate(Ty, Base, NumElts) && FreeSSERegs >= NumElts) {
3783 FreeSSERegs -= NumElts;
3784 return getDirectX86Hva();
3789 ABIArgInfo WinX86_64ABIInfo::classify(QualType Ty, unsigned &FreeSSERegs,
3790 bool IsReturnType, bool IsVectorCall,
3791 bool IsRegCall) const {
3793 if (Ty->isVoidType())
3794 return ABIArgInfo::getIgnore();
3796 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
3797 Ty = EnumTy->getDecl()->getIntegerType();
3799 TypeInfo Info = getContext().getTypeInfo(Ty);
3800 uint64_t Width = Info.Width;
3801 CharUnits Align = getContext().toCharUnitsFromBits(Info.Align);
3803 const RecordType *RT = Ty->getAs<RecordType>();
3805 if (!IsReturnType) {
3806 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI()))
3807 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
3810 if (RT->getDecl()->hasFlexibleArrayMember())
3811 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
3815 const Type *Base = nullptr;
3816 uint64_t NumElts = 0;
3817 // vectorcall adds the concept of a homogenous vector aggregate, similar to
3819 if ((IsVectorCall || IsRegCall) &&
3820 isHomogeneousAggregate(Ty, Base, NumElts)) {
3822 if (FreeSSERegs >= NumElts) {
3823 FreeSSERegs -= NumElts;
3824 if (IsReturnType || Ty->isBuiltinType() || Ty->isVectorType())
3825 return ABIArgInfo::getDirect();
3826 return ABIArgInfo::getExpand();
3828 return ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
3829 } else if (IsVectorCall) {
3830 if (FreeSSERegs >= NumElts &&
3831 (IsReturnType || Ty->isBuiltinType() || Ty->isVectorType())) {
3832 FreeSSERegs -= NumElts;
3833 return ABIArgInfo::getDirect();
3834 } else if (IsReturnType) {
3835 return ABIArgInfo::getExpand();
3836 } else if (!Ty->isBuiltinType() && !Ty->isVectorType()) {
3837 // HVAs are delayed and reclassified in the 2nd step.
3838 return ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
3843 if (Ty->isMemberPointerType()) {
3844 // If the member pointer is represented by an LLVM int or ptr, pass it
3846 llvm::Type *LLTy = CGT.ConvertType(Ty);
3847 if (LLTy->isPointerTy() || LLTy->isIntegerTy())
3848 return ABIArgInfo::getDirect();
3851 if (RT || Ty->isAnyComplexType() || Ty->isMemberPointerType()) {
3852 // MS x64 ABI requirement: "Any argument that doesn't fit in 8 bytes, or is
3853 // not 1, 2, 4, or 8 bytes, must be passed by reference."
3854 if (Width > 64 || !llvm::isPowerOf2_64(Width))
3855 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
3857 // Otherwise, coerce it to a small integer.
3858 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Width));
3861 // Bool type is always extended to the ABI, other builtin types are not
3863 const BuiltinType *BT = Ty->getAs<BuiltinType>();
3864 if (BT && BT->getKind() == BuiltinType::Bool)
3865 return ABIArgInfo::getExtend();
3867 // Mingw64 GCC uses the old 80 bit extended precision floating point unit. It
3868 // passes them indirectly through memory.
3869 if (IsMingw64 && BT && BT->getKind() == BuiltinType::LongDouble) {
3870 const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat();
3871 if (LDF == &llvm::APFloat::x87DoubleExtended())
3872 return ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
3875 return ABIArgInfo::getDirect();
3878 void WinX86_64ABIInfo::computeVectorCallArgs(CGFunctionInfo &FI,
3879 unsigned FreeSSERegs,
3881 bool IsRegCall) const {
3883 for (auto &I : FI.arguments()) {
3884 if (Count < VectorcallMaxParamNumAsReg)
3885 I.info = classify(I.type, FreeSSERegs, false, IsVectorCall, IsRegCall);
3887 // Since these cannot be passed in registers, pretend no registers
3889 unsigned ZeroSSERegsAvail = 0;
3890 I.info = classify(I.type, /*FreeSSERegs=*/ZeroSSERegsAvail, false,
3891 IsVectorCall, IsRegCall);
3897 for (auto &I : FI.arguments()) {
3898 if (Count < VectorcallMaxParamNumAsReg)
3899 I.info = reclassifyHvaArgType(I.type, FreeSSERegs, I.info);
3904 void WinX86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const {
3906 FI.getCallingConvention() == llvm::CallingConv::X86_VectorCall;
3907 bool IsRegCall = FI.getCallingConvention() == llvm::CallingConv::X86_RegCall;
3909 unsigned FreeSSERegs = 0;
3911 // We can use up to 4 SSE return registers with vectorcall.
3913 } else if (IsRegCall) {
3914 // RegCall gives us 16 SSE registers.
3918 if (!getCXXABI().classifyReturnType(FI))
3919 FI.getReturnInfo() = classify(FI.getReturnType(), FreeSSERegs, true,
3920 IsVectorCall, IsRegCall);
3923 // We can use up to 6 SSE register parameters with vectorcall.
3925 } else if (IsRegCall) {
3926 // RegCall gives us 16 SSE registers, we can reuse the return registers.
3931 computeVectorCallArgs(FI, FreeSSERegs, IsVectorCall, IsRegCall);
3933 for (auto &I : FI.arguments())
3934 I.info = classify(I.type, FreeSSERegs, false, IsVectorCall, IsRegCall);
3939 Address WinX86_64ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
3940 QualType Ty) const {
3942 bool IsIndirect = false;
3944 // MS x64 ABI requirement: "Any argument that doesn't fit in 8 bytes, or is
3945 // not 1, 2, 4, or 8 bytes, must be passed by reference."
3946 if (isAggregateTypeForABI(Ty) || Ty->isMemberPointerType()) {
3947 uint64_t Width = getContext().getTypeSize(Ty);
3948 IsIndirect = Width > 64 || !llvm::isPowerOf2_64(Width);
3951 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect,
3952 CGF.getContext().getTypeInfoInChars(Ty),
3953 CharUnits::fromQuantity(8),
3954 /*allowHigherAlign*/ false);
3959 /// PPC32_SVR4_ABIInfo - The 32-bit PowerPC ELF (SVR4) ABI information.
3960 class PPC32_SVR4_ABIInfo : public DefaultABIInfo {
3961 bool IsSoftFloatABI;
3963 PPC32_SVR4_ABIInfo(CodeGen::CodeGenTypes &CGT, bool SoftFloatABI)
3964 : DefaultABIInfo(CGT), IsSoftFloatABI(SoftFloatABI) {}
3966 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
3967 QualType Ty) const override;
3970 class PPC32TargetCodeGenInfo : public TargetCodeGenInfo {
3972 PPC32TargetCodeGenInfo(CodeGenTypes &CGT, bool SoftFloatABI)
3973 : TargetCodeGenInfo(new PPC32_SVR4_ABIInfo(CGT, SoftFloatABI)) {}
3975 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
3976 // This is recovered from gcc output.
3977 return 1; // r1 is the dedicated stack pointer
3980 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
3981 llvm::Value *Address) const override;
3986 // TODO: this implementation is now likely redundant with
3987 // DefaultABIInfo::EmitVAArg.
3988 Address PPC32_SVR4_ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAList,
3989 QualType Ty) const {
3990 const unsigned OverflowLimit = 8;
3991 if (const ComplexType *CTy = Ty->getAs<ComplexType>()) {
3992 // TODO: Implement this. For now ignore.
3994 return Address::invalid(); // FIXME?
3997 // struct __va_list_tag {
3998 // unsigned char gpr;
3999 // unsigned char fpr;
4000 // unsigned short reserved;
4001 // void *overflow_arg_area;
4002 // void *reg_save_area;
4005 bool isI64 = Ty->isIntegerType() && getContext().getTypeSize(Ty) == 64;
4007 Ty->isIntegerType() || Ty->isPointerType() || Ty->isAggregateType();
4008 bool isF64 = Ty->isFloatingType() && getContext().getTypeSize(Ty) == 64;
4010 // All aggregates are passed indirectly? That doesn't seem consistent
4011 // with the argument-lowering code.
4012 bool isIndirect = Ty->isAggregateType();
4014 CGBuilderTy &Builder = CGF.Builder;
4016 // The calling convention either uses 1-2 GPRs or 1 FPR.
4017 Address NumRegsAddr = Address::invalid();
4018 if (isInt || IsSoftFloatABI) {
4019 NumRegsAddr = Builder.CreateStructGEP(VAList, 0, CharUnits::Zero(), "gpr");
4021 NumRegsAddr = Builder.CreateStructGEP(VAList, 1, CharUnits::One(), "fpr");
4024 llvm::Value *NumRegs = Builder.CreateLoad(NumRegsAddr, "numUsedRegs");
4026 // "Align" the register count when TY is i64.
4027 if (isI64 || (isF64 && IsSoftFloatABI)) {
4028 NumRegs = Builder.CreateAdd(NumRegs, Builder.getInt8(1));
4029 NumRegs = Builder.CreateAnd(NumRegs, Builder.getInt8((uint8_t) ~1U));
4033 Builder.CreateICmpULT(NumRegs, Builder.getInt8(OverflowLimit), "cond");
4035 llvm::BasicBlock *UsingRegs = CGF.createBasicBlock("using_regs");
4036 llvm::BasicBlock *UsingOverflow = CGF.createBasicBlock("using_overflow");
4037 llvm::BasicBlock *Cont = CGF.createBasicBlock("cont");
4039 Builder.CreateCondBr(CC, UsingRegs, UsingOverflow);
4041 llvm::Type *DirectTy = CGF.ConvertType(Ty);
4042 if (isIndirect) DirectTy = DirectTy->getPointerTo(0);
4044 // Case 1: consume registers.
4045 Address RegAddr = Address::invalid();
4047 CGF.EmitBlock(UsingRegs);
4049 Address RegSaveAreaPtr =
4050 Builder.CreateStructGEP(VAList, 4, CharUnits::fromQuantity(8));
4051 RegAddr = Address(Builder.CreateLoad(RegSaveAreaPtr),
4052 CharUnits::fromQuantity(8));
4053 assert(RegAddr.getElementType() == CGF.Int8Ty);
4055 // Floating-point registers start after the general-purpose registers.
4056 if (!(isInt || IsSoftFloatABI)) {
4057 RegAddr = Builder.CreateConstInBoundsByteGEP(RegAddr,
4058 CharUnits::fromQuantity(32));
4061 // Get the address of the saved value by scaling the number of
4062 // registers we've used by the number of
4063 CharUnits RegSize = CharUnits::fromQuantity((isInt || IsSoftFloatABI) ? 4 : 8);
4064 llvm::Value *RegOffset =
4065 Builder.CreateMul(NumRegs, Builder.getInt8(RegSize.getQuantity()));
4066 RegAddr = Address(Builder.CreateInBoundsGEP(CGF.Int8Ty,
4067 RegAddr.getPointer(), RegOffset),
4068 RegAddr.getAlignment().alignmentOfArrayElement(RegSize));
4069 RegAddr = Builder.CreateElementBitCast(RegAddr, DirectTy);
4071 // Increase the used-register count.
4073 Builder.CreateAdd(NumRegs,
4074 Builder.getInt8((isI64 || (isF64 && IsSoftFloatABI)) ? 2 : 1));
4075 Builder.CreateStore(NumRegs, NumRegsAddr);
4077 CGF.EmitBranch(Cont);
4080 // Case 2: consume space in the overflow area.
4081 Address MemAddr = Address::invalid();
4083 CGF.EmitBlock(UsingOverflow);
4085 Builder.CreateStore(Builder.getInt8(OverflowLimit), NumRegsAddr);
4087 // Everything in the overflow area is rounded up to a size of at least 4.
4088 CharUnits OverflowAreaAlign = CharUnits::fromQuantity(4);
4092 auto TypeInfo = CGF.getContext().getTypeInfoInChars(Ty);
4093 Size = TypeInfo.first.alignTo(OverflowAreaAlign);
4095 Size = CGF.getPointerSize();
4098 Address OverflowAreaAddr =
4099 Builder.CreateStructGEP(VAList, 3, CharUnits::fromQuantity(4));
4100 Address OverflowArea(Builder.CreateLoad(OverflowAreaAddr, "argp.cur"),
4102 // Round up address of argument to alignment
4103 CharUnits Align = CGF.getContext().getTypeAlignInChars(Ty);
4104 if (Align > OverflowAreaAlign) {
4105 llvm::Value *Ptr = OverflowArea.getPointer();
4106 OverflowArea = Address(emitRoundPointerUpToAlignment(CGF, Ptr, Align),
4110 MemAddr = Builder.CreateElementBitCast(OverflowArea, DirectTy);
4112 // Increase the overflow area.
4113 OverflowArea = Builder.CreateConstInBoundsByteGEP(OverflowArea, Size);
4114 Builder.CreateStore(OverflowArea.getPointer(), OverflowAreaAddr);
4115 CGF.EmitBranch(Cont);
4118 CGF.EmitBlock(Cont);
4120 // Merge the cases with a phi.
4121 Address Result = emitMergePHI(CGF, RegAddr, UsingRegs, MemAddr, UsingOverflow,
4124 // Load the pointer if the argument was passed indirectly.
4126 Result = Address(Builder.CreateLoad(Result, "aggr"),
4127 getContext().getTypeAlignInChars(Ty));
4134 PPC32TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
4135 llvm::Value *Address) const {
4136 // This is calculated from the LLVM and GCC tables and verified
4137 // against gcc output. AFAIK all ABIs use the same encoding.
4139 CodeGen::CGBuilderTy &Builder = CGF.Builder;
4141 llvm::IntegerType *i8 = CGF.Int8Ty;
4142 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
4143 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
4144 llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16);
4146 // 0-31: r0-31, the 4-byte general-purpose registers
4147 AssignToArrayRange(Builder, Address, Four8, 0, 31);
4149 // 32-63: fp0-31, the 8-byte floating-point registers
4150 AssignToArrayRange(Builder, Address, Eight8, 32, 63);
4152 // 64-76 are various 4-byte special-purpose registers:
4159 AssignToArrayRange(Builder, Address, Four8, 64, 76);
4161 // 77-108: v0-31, the 16-byte vector registers
4162 AssignToArrayRange(Builder, Address, Sixteen8, 77, 108);
4169 AssignToArrayRange(Builder, Address, Four8, 109, 113);
4177 /// PPC64_SVR4_ABIInfo - The 64-bit PowerPC ELF (SVR4) ABI information.
4178 class PPC64_SVR4_ABIInfo : public ABIInfo {
4186 static const unsigned GPRBits = 64;
4189 bool IsSoftFloatABI;
4191 // A vector of float or double will be promoted to <4 x f32> or <4 x f64> and
4192 // will be passed in a QPX register.
4193 bool IsQPXVectorTy(const Type *Ty) const {
4197 if (const VectorType *VT = Ty->getAs<VectorType>()) {
4198 unsigned NumElements = VT->getNumElements();
4199 if (NumElements == 1)
4202 if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::Double)) {
4203 if (getContext().getTypeSize(Ty) <= 256)
4205 } else if (VT->getElementType()->
4206 isSpecificBuiltinType(BuiltinType::Float)) {
4207 if (getContext().getTypeSize(Ty) <= 128)
4215 bool IsQPXVectorTy(QualType Ty) const {
4216 return IsQPXVectorTy(Ty.getTypePtr());
4220 PPC64_SVR4_ABIInfo(CodeGen::CodeGenTypes &CGT, ABIKind Kind, bool HasQPX,
4222 : ABIInfo(CGT), Kind(Kind), HasQPX(HasQPX),
4223 IsSoftFloatABI(SoftFloatABI) {}
4225 bool isPromotableTypeForABI(QualType Ty) const;
4226 CharUnits getParamTypeAlignment(QualType Ty) const;
4228 ABIArgInfo classifyReturnType(QualType RetTy) const;
4229 ABIArgInfo classifyArgumentType(QualType Ty) const;
4231 bool isHomogeneousAggregateBaseType(QualType Ty) const override;
4232 bool isHomogeneousAggregateSmallEnough(const Type *Ty,
4233 uint64_t Members) const override;
4235 // TODO: We can add more logic to computeInfo to improve performance.
4236 // Example: For aggregate arguments that fit in a register, we could
4237 // use getDirectInReg (as is done below for structs containing a single
4238 // floating-point value) to avoid pushing them to memory on function
4239 // entry. This would require changing the logic in PPCISelLowering
4240 // when lowering the parameters in the caller and args in the callee.
4241 void computeInfo(CGFunctionInfo &FI) const override {
4242 if (!getCXXABI().classifyReturnType(FI))
4243 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
4244 for (auto &I : FI.arguments()) {
4245 // We rely on the default argument classification for the most part.
4246 // One exception: An aggregate containing a single floating-point
4247 // or vector item must be passed in a register if one is available.
4248 const Type *T = isSingleElementStruct(I.type, getContext());
4250 const BuiltinType *BT = T->getAs<BuiltinType>();
4251 if (IsQPXVectorTy(T) ||
4252 (T->isVectorType() && getContext().getTypeSize(T) == 128) ||
4253 (BT && BT->isFloatingPoint())) {
4255 I.info = ABIArgInfo::getDirectInReg(CGT.ConvertType(QT));
4259 I.info = classifyArgumentType(I.type);
4263 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
4264 QualType Ty) const override;
4267 class PPC64_SVR4_TargetCodeGenInfo : public TargetCodeGenInfo {
4270 PPC64_SVR4_TargetCodeGenInfo(CodeGenTypes &CGT,
4271 PPC64_SVR4_ABIInfo::ABIKind Kind, bool HasQPX,
4273 : TargetCodeGenInfo(new PPC64_SVR4_ABIInfo(CGT, Kind, HasQPX,
4276 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
4277 // This is recovered from gcc output.
4278 return 1; // r1 is the dedicated stack pointer
4281 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
4282 llvm::Value *Address) const override;
4285 class PPC64TargetCodeGenInfo : public DefaultTargetCodeGenInfo {
4287 PPC64TargetCodeGenInfo(CodeGenTypes &CGT) : DefaultTargetCodeGenInfo(CGT) {}
4289 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
4290 // This is recovered from gcc output.
4291 return 1; // r1 is the dedicated stack pointer
4294 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
4295 llvm::Value *Address) const override;
4300 // Return true if the ABI requires Ty to be passed sign- or zero-
4301 // extended to 64 bits.
4303 PPC64_SVR4_ABIInfo::isPromotableTypeForABI(QualType Ty) const {
4304 // Treat an enum type as its underlying type.
4305 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
4306 Ty = EnumTy->getDecl()->getIntegerType();
4308 // Promotable integer types are required to be promoted by the ABI.
4309 if (Ty->isPromotableIntegerType())
4312 // In addition to the usual promotable integer types, we also need to
4313 // extend all 32-bit types, since the ABI requires promotion to 64 bits.
4314 if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
4315 switch (BT->getKind()) {
4316 case BuiltinType::Int:
4317 case BuiltinType::UInt:
4326 /// isAlignedParamType - Determine whether a type requires 16-byte or
4327 /// higher alignment in the parameter area. Always returns at least 8.
4328 CharUnits PPC64_SVR4_ABIInfo::getParamTypeAlignment(QualType Ty) const {
4329 // Complex types are passed just like their elements.
4330 if (const ComplexType *CTy = Ty->getAs<ComplexType>())
4331 Ty = CTy->getElementType();
4333 // Only vector types of size 16 bytes need alignment (larger types are
4334 // passed via reference, smaller types are not aligned).
4335 if (IsQPXVectorTy(Ty)) {
4336 if (getContext().getTypeSize(Ty) > 128)
4337 return CharUnits::fromQuantity(32);
4339 return CharUnits::fromQuantity(16);
4340 } else if (Ty->isVectorType()) {
4341 return CharUnits::fromQuantity(getContext().getTypeSize(Ty) == 128 ? 16 : 8);
4344 // For single-element float/vector structs, we consider the whole type
4345 // to have the same alignment requirements as its single element.
4346 const Type *AlignAsType = nullptr;
4347 const Type *EltType = isSingleElementStruct(Ty, getContext());
4349 const BuiltinType *BT = EltType->getAs<BuiltinType>();
4350 if (IsQPXVectorTy(EltType) || (EltType->isVectorType() &&
4351 getContext().getTypeSize(EltType) == 128) ||
4352 (BT && BT->isFloatingPoint()))
4353 AlignAsType = EltType;
4356 // Likewise for ELFv2 homogeneous aggregates.
4357 const Type *Base = nullptr;
4358 uint64_t Members = 0;
4359 if (!AlignAsType && Kind == ELFv2 &&
4360 isAggregateTypeForABI(Ty) && isHomogeneousAggregate(Ty, Base, Members))
4363 // With special case aggregates, only vector base types need alignment.
4364 if (AlignAsType && IsQPXVectorTy(AlignAsType)) {
4365 if (getContext().getTypeSize(AlignAsType) > 128)
4366 return CharUnits::fromQuantity(32);
4368 return CharUnits::fromQuantity(16);
4369 } else if (AlignAsType) {
4370 return CharUnits::fromQuantity(AlignAsType->isVectorType() ? 16 : 8);
4373 // Otherwise, we only need alignment for any aggregate type that
4374 // has an alignment requirement of >= 16 bytes.
4375 if (isAggregateTypeForABI(Ty) && getContext().getTypeAlign(Ty) >= 128) {
4376 if (HasQPX && getContext().getTypeAlign(Ty) >= 256)
4377 return CharUnits::fromQuantity(32);
4378 return CharUnits::fromQuantity(16);
4381 return CharUnits::fromQuantity(8);
4384 /// isHomogeneousAggregate - Return true if a type is an ELFv2 homogeneous
4385 /// aggregate. Base is set to the base element type, and Members is set
4386 /// to the number of base elements.
4387 bool ABIInfo::isHomogeneousAggregate(QualType Ty, const Type *&Base,
4388 uint64_t &Members) const {
4389 if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
4390 uint64_t NElements = AT->getSize().getZExtValue();
4393 if (!isHomogeneousAggregate(AT->getElementType(), Base, Members))
4395 Members *= NElements;
4396 } else if (const RecordType *RT = Ty->getAs<RecordType>()) {
4397 const RecordDecl *RD = RT->getDecl();
4398 if (RD->hasFlexibleArrayMember())
4403 // If this is a C++ record, check the bases first.
4404 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
4405 for (const auto &I : CXXRD->bases()) {
4406 // Ignore empty records.
4407 if (isEmptyRecord(getContext(), I.getType(), true))
4410 uint64_t FldMembers;
4411 if (!isHomogeneousAggregate(I.getType(), Base, FldMembers))
4414 Members += FldMembers;
4418 for (const auto *FD : RD->fields()) {
4419 // Ignore (non-zero arrays of) empty records.
4420 QualType FT = FD->getType();
4421 while (const ConstantArrayType *AT =
4422 getContext().getAsConstantArrayType(FT)) {
4423 if (AT->getSize().getZExtValue() == 0)
4425 FT = AT->getElementType();
4427 if (isEmptyRecord(getContext(), FT, true))
4430 // For compatibility with GCC, ignore empty bitfields in C++ mode.
4431 if (getContext().getLangOpts().CPlusPlus &&
4432 FD->isBitField() && FD->getBitWidthValue(getContext()) == 0)
4435 uint64_t FldMembers;
4436 if (!isHomogeneousAggregate(FD->getType(), Base, FldMembers))
4439 Members = (RD->isUnion() ?
4440 std::max(Members, FldMembers) : Members + FldMembers);
4446 // Ensure there is no padding.
4447 if (getContext().getTypeSize(Base) * Members !=
4448 getContext().getTypeSize(Ty))
4452 if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
4454 Ty = CT->getElementType();
4457 // Most ABIs only support float, double, and some vector type widths.
4458 if (!isHomogeneousAggregateBaseType(Ty))
4461 // The base type must be the same for all members. Types that
4462 // agree in both total size and mode (float vs. vector) are
4463 // treated as being equivalent here.
4464 const Type *TyPtr = Ty.getTypePtr();
4467 // If it's a non-power-of-2 vector, its size is already a power-of-2,
4468 // so make sure to widen it explicitly.
4469 if (const VectorType *VT = Base->getAs<VectorType>()) {
4470 QualType EltTy = VT->getElementType();
4471 unsigned NumElements =
4472 getContext().getTypeSize(VT) / getContext().getTypeSize(EltTy);
4474 .getVectorType(EltTy, NumElements, VT->getVectorKind())
4479 if (Base->isVectorType() != TyPtr->isVectorType() ||
4480 getContext().getTypeSize(Base) != getContext().getTypeSize(TyPtr))
4483 return Members > 0 && isHomogeneousAggregateSmallEnough(Base, Members);
4486 bool PPC64_SVR4_ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
4487 // Homogeneous aggregates for ELFv2 must have base types of float,
4488 // double, long double, or 128-bit vectors.
4489 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
4490 if (BT->getKind() == BuiltinType::Float ||
4491 BT->getKind() == BuiltinType::Double ||
4492 BT->getKind() == BuiltinType::LongDouble) {
4498 if (const VectorType *VT = Ty->getAs<VectorType>()) {
4499 if (getContext().getTypeSize(VT) == 128 || IsQPXVectorTy(Ty))
4505 bool PPC64_SVR4_ABIInfo::isHomogeneousAggregateSmallEnough(
4506 const Type *Base, uint64_t Members) const {
4507 // Vector types require one register, floating point types require one
4508 // or two registers depending on their size.
4510 Base->isVectorType() ? 1 : (getContext().getTypeSize(Base) + 63) / 64;
4512 // Homogeneous Aggregates may occupy at most 8 registers.
4513 return Members * NumRegs <= 8;
4517 PPC64_SVR4_ABIInfo::classifyArgumentType(QualType Ty) const {
4518 Ty = useFirstFieldIfTransparentUnion(Ty);
4520 if (Ty->isAnyComplexType())
4521 return ABIArgInfo::getDirect();
4523 // Non-Altivec vector types are passed in GPRs (smaller than 16 bytes)
4524 // or via reference (larger than 16 bytes).
4525 if (Ty->isVectorType() && !IsQPXVectorTy(Ty)) {
4526 uint64_t Size = getContext().getTypeSize(Ty);
4528 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
4529 else if (Size < 128) {
4530 llvm::Type *CoerceTy = llvm::IntegerType::get(getVMContext(), Size);
4531 return ABIArgInfo::getDirect(CoerceTy);
4535 if (isAggregateTypeForABI(Ty)) {
4536 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
4537 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
4539 uint64_t ABIAlign = getParamTypeAlignment(Ty).getQuantity();
4540 uint64_t TyAlign = getContext().getTypeAlignInChars(Ty).getQuantity();
4542 // ELFv2 homogeneous aggregates are passed as array types.
4543 const Type *Base = nullptr;
4544 uint64_t Members = 0;
4545 if (Kind == ELFv2 &&
4546 isHomogeneousAggregate(Ty, Base, Members)) {
4547 llvm::Type *BaseTy = CGT.ConvertType(QualType(Base, 0));
4548 llvm::Type *CoerceTy = llvm::ArrayType::get(BaseTy, Members);
4549 return ABIArgInfo::getDirect(CoerceTy);
4552 // If an aggregate may end up fully in registers, we do not
4553 // use the ByVal method, but pass the aggregate as array.
4554 // This is usually beneficial since we avoid forcing the
4555 // back-end to store the argument to memory.
4556 uint64_t Bits = getContext().getTypeSize(Ty);
4557 if (Bits > 0 && Bits <= 8 * GPRBits) {
4558 llvm::Type *CoerceTy;
4560 // Types up to 8 bytes are passed as integer type (which will be
4561 // properly aligned in the argument save area doubleword).
4562 if (Bits <= GPRBits)
4564 llvm::IntegerType::get(getVMContext(), llvm::alignTo(Bits, 8));
4565 // Larger types are passed as arrays, with the base type selected
4566 // according to the required alignment in the save area.
4568 uint64_t RegBits = ABIAlign * 8;
4569 uint64_t NumRegs = llvm::alignTo(Bits, RegBits) / RegBits;
4570 llvm::Type *RegTy = llvm::IntegerType::get(getVMContext(), RegBits);
4571 CoerceTy = llvm::ArrayType::get(RegTy, NumRegs);
4574 return ABIArgInfo::getDirect(CoerceTy);
4577 // All other aggregates are passed ByVal.
4578 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(ABIAlign),
4580 /*Realign=*/TyAlign > ABIAlign);
4583 return (isPromotableTypeForABI(Ty) ?
4584 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
4588 PPC64_SVR4_ABIInfo::classifyReturnType(QualType RetTy) const {
4589 if (RetTy->isVoidType())
4590 return ABIArgInfo::getIgnore();
4592 if (RetTy->isAnyComplexType())
4593 return ABIArgInfo::getDirect();
4595 // Non-Altivec vector types are returned in GPRs (smaller than 16 bytes)
4596 // or via reference (larger than 16 bytes).
4597 if (RetTy->isVectorType() && !IsQPXVectorTy(RetTy)) {
4598 uint64_t Size = getContext().getTypeSize(RetTy);
4600 return getNaturalAlignIndirect(RetTy);
4601 else if (Size < 128) {
4602 llvm::Type *CoerceTy = llvm::IntegerType::get(getVMContext(), Size);
4603 return ABIArgInfo::getDirect(CoerceTy);
4607 if (isAggregateTypeForABI(RetTy)) {
4608 // ELFv2 homogeneous aggregates are returned as array types.
4609 const Type *Base = nullptr;
4610 uint64_t Members = 0;
4611 if (Kind == ELFv2 &&
4612 isHomogeneousAggregate(RetTy, Base, Members)) {
4613 llvm::Type *BaseTy = CGT.ConvertType(QualType(Base, 0));
4614 llvm::Type *CoerceTy = llvm::ArrayType::get(BaseTy, Members);
4615 return ABIArgInfo::getDirect(CoerceTy);
4618 // ELFv2 small aggregates are returned in up to two registers.
4619 uint64_t Bits = getContext().getTypeSize(RetTy);
4620 if (Kind == ELFv2 && Bits <= 2 * GPRBits) {
4622 return ABIArgInfo::getIgnore();
4624 llvm::Type *CoerceTy;
4625 if (Bits > GPRBits) {
4626 CoerceTy = llvm::IntegerType::get(getVMContext(), GPRBits);
4627 CoerceTy = llvm::StructType::get(CoerceTy, CoerceTy, nullptr);
4630 llvm::IntegerType::get(getVMContext(), llvm::alignTo(Bits, 8));
4631 return ABIArgInfo::getDirect(CoerceTy);
4634 // All other aggregates are returned indirectly.
4635 return getNaturalAlignIndirect(RetTy);
4638 return (isPromotableTypeForABI(RetTy) ?
4639 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
4642 // Based on ARMABIInfo::EmitVAArg, adjusted for 64-bit machine.
4643 Address PPC64_SVR4_ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
4644 QualType Ty) const {
4645 auto TypeInfo = getContext().getTypeInfoInChars(Ty);
4646 TypeInfo.second = getParamTypeAlignment(Ty);
4648 CharUnits SlotSize = CharUnits::fromQuantity(8);
4650 // If we have a complex type and the base type is smaller than 8 bytes,
4651 // the ABI calls for the real and imaginary parts to be right-adjusted
4652 // in separate doublewords. However, Clang expects us to produce a
4653 // pointer to a structure with the two parts packed tightly. So generate
4654 // loads of the real and imaginary parts relative to the va_list pointer,
4655 // and store them to a temporary structure.
4656 if (const ComplexType *CTy = Ty->getAs<ComplexType>()) {
4657 CharUnits EltSize = TypeInfo.first / 2;
4658 if (EltSize < SlotSize) {
4659 Address Addr = emitVoidPtrDirectVAArg(CGF, VAListAddr, CGF.Int8Ty,
4660 SlotSize * 2, SlotSize,
4661 SlotSize, /*AllowHigher*/ true);
4663 Address RealAddr = Addr;
4664 Address ImagAddr = RealAddr;
4665 if (CGF.CGM.getDataLayout().isBigEndian()) {
4666 RealAddr = CGF.Builder.CreateConstInBoundsByteGEP(RealAddr,
4667 SlotSize - EltSize);
4668 ImagAddr = CGF.Builder.CreateConstInBoundsByteGEP(ImagAddr,
4669 2 * SlotSize - EltSize);
4671 ImagAddr = CGF.Builder.CreateConstInBoundsByteGEP(RealAddr, SlotSize);
4674 llvm::Type *EltTy = CGF.ConvertTypeForMem(CTy->getElementType());
4675 RealAddr = CGF.Builder.CreateElementBitCast(RealAddr, EltTy);
4676 ImagAddr = CGF.Builder.CreateElementBitCast(ImagAddr, EltTy);
4677 llvm::Value *Real = CGF.Builder.CreateLoad(RealAddr, ".vareal");
4678 llvm::Value *Imag = CGF.Builder.CreateLoad(ImagAddr, ".vaimag");
4680 Address Temp = CGF.CreateMemTemp(Ty, "vacplx");
4681 CGF.EmitStoreOfComplex({Real, Imag}, CGF.MakeAddrLValue(Temp, Ty),
4687 // Otherwise, just use the general rule.
4688 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*Indirect*/ false,
4689 TypeInfo, SlotSize, /*AllowHigher*/ true);
4693 PPC64_initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
4694 llvm::Value *Address) {
4695 // This is calculated from the LLVM and GCC tables and verified
4696 // against gcc output. AFAIK all ABIs use the same encoding.
4698 CodeGen::CGBuilderTy &Builder = CGF.Builder;
4700 llvm::IntegerType *i8 = CGF.Int8Ty;
4701 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
4702 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
4703 llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16);
4705 // 0-31: r0-31, the 8-byte general-purpose registers
4706 AssignToArrayRange(Builder, Address, Eight8, 0, 31);
4708 // 32-63: fp0-31, the 8-byte floating-point registers
4709 AssignToArrayRange(Builder, Address, Eight8, 32, 63);
4711 // 64-67 are various 8-byte special-purpose registers:
4716 AssignToArrayRange(Builder, Address, Eight8, 64, 67);
4718 // 68-76 are various 4-byte special-purpose registers:
4721 AssignToArrayRange(Builder, Address, Four8, 68, 76);
4723 // 77-108: v0-31, the 16-byte vector registers
4724 AssignToArrayRange(Builder, Address, Sixteen8, 77, 108);
4734 AssignToArrayRange(Builder, Address, Eight8, 109, 116);
4740 PPC64_SVR4_TargetCodeGenInfo::initDwarfEHRegSizeTable(
4741 CodeGen::CodeGenFunction &CGF,
4742 llvm::Value *Address) const {
4744 return PPC64_initDwarfEHRegSizeTable(CGF, Address);
4748 PPC64TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
4749 llvm::Value *Address) const {
4751 return PPC64_initDwarfEHRegSizeTable(CGF, Address);
4754 //===----------------------------------------------------------------------===//
4755 // AArch64 ABI Implementation
4756 //===----------------------------------------------------------------------===//
4760 class AArch64ABIInfo : public SwiftABIInfo {
4771 AArch64ABIInfo(CodeGenTypes &CGT, ABIKind Kind)
4772 : SwiftABIInfo(CGT), Kind(Kind) {}
4775 ABIKind getABIKind() const { return Kind; }
4776 bool isDarwinPCS() const { return Kind == DarwinPCS; }
4778 ABIArgInfo classifyReturnType(QualType RetTy) const;
4779 ABIArgInfo classifyArgumentType(QualType RetTy) const;
4780 bool isHomogeneousAggregateBaseType(QualType Ty) const override;
4781 bool isHomogeneousAggregateSmallEnough(const Type *Ty,
4782 uint64_t Members) const override;
4784 bool isIllegalVectorType(QualType Ty) const;
4786 void computeInfo(CGFunctionInfo &FI) const override {
4787 if (!getCXXABI().classifyReturnType(FI))
4788 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
4790 for (auto &it : FI.arguments())
4791 it.info = classifyArgumentType(it.type);
4794 Address EmitDarwinVAArg(Address VAListAddr, QualType Ty,
4795 CodeGenFunction &CGF) const;
4797 Address EmitAAPCSVAArg(Address VAListAddr, QualType Ty,
4798 CodeGenFunction &CGF) const;
4800 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
4801 QualType Ty) const override {
4802 return isDarwinPCS() ? EmitDarwinVAArg(VAListAddr, Ty, CGF)
4803 : EmitAAPCSVAArg(VAListAddr, Ty, CGF);
4806 bool shouldPassIndirectlyForSwift(CharUnits totalSize,
4807 ArrayRef<llvm::Type*> scalars,
4808 bool asReturnValue) const override {
4809 return occupiesMoreThan(CGT, scalars, /*total*/ 4);
4811 bool isSwiftErrorInRegister() const override {
4816 class AArch64TargetCodeGenInfo : public TargetCodeGenInfo {
4818 AArch64TargetCodeGenInfo(CodeGenTypes &CGT, AArch64ABIInfo::ABIKind Kind)
4819 : TargetCodeGenInfo(new AArch64ABIInfo(CGT, Kind)) {}
4821 StringRef getARCRetainAutoreleasedReturnValueMarker() const override {
4822 return "mov\tfp, fp\t\t; marker for objc_retainAutoreleaseReturnValue";
4825 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
4829 bool doesReturnSlotInterfereWithArgs() const override { return false; }
4833 ABIArgInfo AArch64ABIInfo::classifyArgumentType(QualType Ty) const {
4834 Ty = useFirstFieldIfTransparentUnion(Ty);
4836 // Handle illegal vector types here.
4837 if (isIllegalVectorType(Ty)) {
4838 uint64_t Size = getContext().getTypeSize(Ty);
4839 // Android promotes <2 x i8> to i16, not i32
4840 if (isAndroid() && (Size <= 16)) {
4841 llvm::Type *ResType = llvm::Type::getInt16Ty(getVMContext());
4842 return ABIArgInfo::getDirect(ResType);
4845 llvm::Type *ResType = llvm::Type::getInt32Ty(getVMContext());
4846 return ABIArgInfo::getDirect(ResType);
4849 llvm::Type *ResType =
4850 llvm::VectorType::get(llvm::Type::getInt32Ty(getVMContext()), 2);
4851 return ABIArgInfo::getDirect(ResType);
4854 llvm::Type *ResType =
4855 llvm::VectorType::get(llvm::Type::getInt32Ty(getVMContext()), 4);
4856 return ABIArgInfo::getDirect(ResType);
4858 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
4861 if (!isAggregateTypeForABI(Ty)) {
4862 // Treat an enum type as its underlying type.
4863 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
4864 Ty = EnumTy->getDecl()->getIntegerType();
4866 return (Ty->isPromotableIntegerType() && isDarwinPCS()
4867 ? ABIArgInfo::getExtend()
4868 : ABIArgInfo::getDirect());
4871 // Structures with either a non-trivial destructor or a non-trivial
4872 // copy constructor are always indirect.
4873 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) {
4874 return getNaturalAlignIndirect(Ty, /*ByVal=*/RAA ==
4875 CGCXXABI::RAA_DirectInMemory);
4878 // Empty records are always ignored on Darwin, but actually passed in C++ mode
4879 // elsewhere for GNU compatibility.
4880 if (isEmptyRecord(getContext(), Ty, true)) {
4881 if (!getContext().getLangOpts().CPlusPlus || isDarwinPCS())
4882 return ABIArgInfo::getIgnore();
4884 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
4887 // Homogeneous Floating-point Aggregates (HFAs) need to be expanded.
4888 const Type *Base = nullptr;
4889 uint64_t Members = 0;
4890 if (isHomogeneousAggregate(Ty, Base, Members)) {
4891 return ABIArgInfo::getDirect(
4892 llvm::ArrayType::get(CGT.ConvertType(QualType(Base, 0)), Members));
4895 // Aggregates <= 16 bytes are passed directly in registers or on the stack.
4896 uint64_t Size = getContext().getTypeSize(Ty);
4898 // On RenderScript, coerce Aggregates <= 16 bytes to an integer array of
4899 // same size and alignment.
4900 if (getTarget().isRenderScriptTarget()) {
4901 return coerceToIntArray(Ty, getContext(), getVMContext());
4903 unsigned Alignment = getContext().getTypeAlign(Ty);
4904 Size = 64 * ((Size + 63) / 64); // round up to multiple of 8 bytes
4906 // We use a pair of i64 for 16-byte aggregate with 8-byte alignment.
4907 // For aggregates with 16-byte alignment, we use i128.
4908 if (Alignment < 128 && Size == 128) {
4909 llvm::Type *BaseTy = llvm::Type::getInt64Ty(getVMContext());
4910 return ABIArgInfo::getDirect(llvm::ArrayType::get(BaseTy, Size / 64));
4912 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Size));
4915 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
4918 ABIArgInfo AArch64ABIInfo::classifyReturnType(QualType RetTy) const {
4919 if (RetTy->isVoidType())
4920 return ABIArgInfo::getIgnore();
4922 // Large vector types should be returned via memory.
4923 if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 128)
4924 return getNaturalAlignIndirect(RetTy);
4926 if (!isAggregateTypeForABI(RetTy)) {
4927 // Treat an enum type as its underlying type.
4928 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
4929 RetTy = EnumTy->getDecl()->getIntegerType();
4931 return (RetTy->isPromotableIntegerType() && isDarwinPCS()
4932 ? ABIArgInfo::getExtend()
4933 : ABIArgInfo::getDirect());
4936 if (isEmptyRecord(getContext(), RetTy, true))
4937 return ABIArgInfo::getIgnore();
4939 const Type *Base = nullptr;
4940 uint64_t Members = 0;
4941 if (isHomogeneousAggregate(RetTy, Base, Members))
4942 // Homogeneous Floating-point Aggregates (HFAs) are returned directly.
4943 return ABIArgInfo::getDirect();
4945 // Aggregates <= 16 bytes are returned directly in registers or on the stack.
4946 uint64_t Size = getContext().getTypeSize(RetTy);
4948 // On RenderScript, coerce Aggregates <= 16 bytes to an integer array of
4949 // same size and alignment.
4950 if (getTarget().isRenderScriptTarget()) {
4951 return coerceToIntArray(RetTy, getContext(), getVMContext());
4953 unsigned Alignment = getContext().getTypeAlign(RetTy);
4954 Size = 64 * ((Size + 63) / 64); // round up to multiple of 8 bytes
4956 // We use a pair of i64 for 16-byte aggregate with 8-byte alignment.
4957 // For aggregates with 16-byte alignment, we use i128.
4958 if (Alignment < 128 && Size == 128) {
4959 llvm::Type *BaseTy = llvm::Type::getInt64Ty(getVMContext());
4960 return ABIArgInfo::getDirect(llvm::ArrayType::get(BaseTy, Size / 64));
4962 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Size));
4965 return getNaturalAlignIndirect(RetTy);
4968 /// isIllegalVectorType - check whether the vector type is legal for AArch64.
4969 bool AArch64ABIInfo::isIllegalVectorType(QualType Ty) const {
4970 if (const VectorType *VT = Ty->getAs<VectorType>()) {
4971 // Check whether VT is legal.
4972 unsigned NumElements = VT->getNumElements();
4973 uint64_t Size = getContext().getTypeSize(VT);
4974 // NumElements should be power of 2.
4975 if (!llvm::isPowerOf2_32(NumElements))
4977 return Size != 64 && (Size != 128 || NumElements == 1);
4982 bool AArch64ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
4983 // Homogeneous aggregates for AAPCS64 must have base types of a floating
4984 // point type or a short-vector type. This is the same as the 32-bit ABI,
4985 // but with the difference that any floating-point type is allowed,
4986 // including __fp16.
4987 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
4988 if (BT->isFloatingPoint())
4990 } else if (const VectorType *VT = Ty->getAs<VectorType>()) {
4991 unsigned VecSize = getContext().getTypeSize(VT);
4992 if (VecSize == 64 || VecSize == 128)
4998 bool AArch64ABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base,
4999 uint64_t Members) const {
5000 return Members <= 4;
5003 Address AArch64ABIInfo::EmitAAPCSVAArg(Address VAListAddr,
5005 CodeGenFunction &CGF) const {
5006 ABIArgInfo AI = classifyArgumentType(Ty);
5007 bool IsIndirect = AI.isIndirect();
5009 llvm::Type *BaseTy = CGF.ConvertType(Ty);
5011 BaseTy = llvm::PointerType::getUnqual(BaseTy);
5012 else if (AI.getCoerceToType())
5013 BaseTy = AI.getCoerceToType();
5015 unsigned NumRegs = 1;
5016 if (llvm::ArrayType *ArrTy = dyn_cast<llvm::ArrayType>(BaseTy)) {
5017 BaseTy = ArrTy->getElementType();
5018 NumRegs = ArrTy->getNumElements();
5020 bool IsFPR = BaseTy->isFloatingPointTy() || BaseTy->isVectorTy();
5022 // The AArch64 va_list type and handling is specified in the Procedure Call
5023 // Standard, section B.4:
5033 llvm::BasicBlock *MaybeRegBlock = CGF.createBasicBlock("vaarg.maybe_reg");
5034 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
5035 llvm::BasicBlock *OnStackBlock = CGF.createBasicBlock("vaarg.on_stack");
5036 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
5038 auto TyInfo = getContext().getTypeInfoInChars(Ty);
5039 CharUnits TyAlign = TyInfo.second;
5041 Address reg_offs_p = Address::invalid();
5042 llvm::Value *reg_offs = nullptr;
5044 CharUnits reg_top_offset;
5045 int RegSize = IsIndirect ? 8 : TyInfo.first.getQuantity();
5047 // 3 is the field number of __gr_offs
5049 CGF.Builder.CreateStructGEP(VAListAddr, 3, CharUnits::fromQuantity(24),
5051 reg_offs = CGF.Builder.CreateLoad(reg_offs_p, "gr_offs");
5052 reg_top_index = 1; // field number for __gr_top
5053 reg_top_offset = CharUnits::fromQuantity(8);
5054 RegSize = llvm::alignTo(RegSize, 8);
5056 // 4 is the field number of __vr_offs.
5058 CGF.Builder.CreateStructGEP(VAListAddr, 4, CharUnits::fromQuantity(28),
5060 reg_offs = CGF.Builder.CreateLoad(reg_offs_p, "vr_offs");
5061 reg_top_index = 2; // field number for __vr_top
5062 reg_top_offset = CharUnits::fromQuantity(16);
5063 RegSize = 16 * NumRegs;
5066 //=======================================
5067 // Find out where argument was passed
5068 //=======================================
5070 // If reg_offs >= 0 we're already using the stack for this type of
5071 // argument. We don't want to keep updating reg_offs (in case it overflows,
5072 // though anyone passing 2GB of arguments, each at most 16 bytes, deserves
5073 // whatever they get).
5074 llvm::Value *UsingStack = nullptr;
5075 UsingStack = CGF.Builder.CreateICmpSGE(
5076 reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, 0));
5078 CGF.Builder.CreateCondBr(UsingStack, OnStackBlock, MaybeRegBlock);
5080 // Otherwise, at least some kind of argument could go in these registers, the
5081 // question is whether this particular type is too big.
5082 CGF.EmitBlock(MaybeRegBlock);
5084 // Integer arguments may need to correct register alignment (for example a
5085 // "struct { __int128 a; };" gets passed in x_2N, x_{2N+1}). In this case we
5086 // align __gr_offs to calculate the potential address.
5087 if (!IsFPR && !IsIndirect && TyAlign.getQuantity() > 8) {
5088 int Align = TyAlign.getQuantity();
5090 reg_offs = CGF.Builder.CreateAdd(
5091 reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, Align - 1),
5093 reg_offs = CGF.Builder.CreateAnd(
5094 reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, -Align),
5098 // Update the gr_offs/vr_offs pointer for next call to va_arg on this va_list.
5099 // The fact that this is done unconditionally reflects the fact that
5100 // allocating an argument to the stack also uses up all the remaining
5101 // registers of the appropriate kind.
5102 llvm::Value *NewOffset = nullptr;
5103 NewOffset = CGF.Builder.CreateAdd(
5104 reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, RegSize), "new_reg_offs");
5105 CGF.Builder.CreateStore(NewOffset, reg_offs_p);
5107 // Now we're in a position to decide whether this argument really was in
5108 // registers or not.
5109 llvm::Value *InRegs = nullptr;
5110 InRegs = CGF.Builder.CreateICmpSLE(
5111 NewOffset, llvm::ConstantInt::get(CGF.Int32Ty, 0), "inreg");
5113 CGF.Builder.CreateCondBr(InRegs, InRegBlock, OnStackBlock);
5115 //=======================================
5116 // Argument was in registers
5117 //=======================================
5119 // Now we emit the code for if the argument was originally passed in
5120 // registers. First start the appropriate block:
5121 CGF.EmitBlock(InRegBlock);
5123 llvm::Value *reg_top = nullptr;
5124 Address reg_top_p = CGF.Builder.CreateStructGEP(VAListAddr, reg_top_index,
5125 reg_top_offset, "reg_top_p");
5126 reg_top = CGF.Builder.CreateLoad(reg_top_p, "reg_top");
5127 Address BaseAddr(CGF.Builder.CreateInBoundsGEP(reg_top, reg_offs),
5128 CharUnits::fromQuantity(IsFPR ? 16 : 8));
5129 Address RegAddr = Address::invalid();
5130 llvm::Type *MemTy = CGF.ConvertTypeForMem(Ty);
5133 // If it's been passed indirectly (actually a struct), whatever we find from
5134 // stored registers or on the stack will actually be a struct **.
5135 MemTy = llvm::PointerType::getUnqual(MemTy);
5138 const Type *Base = nullptr;
5139 uint64_t NumMembers = 0;
5140 bool IsHFA = isHomogeneousAggregate(Ty, Base, NumMembers);
5141 if (IsHFA && NumMembers > 1) {
5142 // Homogeneous aggregates passed in registers will have their elements split
5143 // and stored 16-bytes apart regardless of size (they're notionally in qN,
5144 // qN+1, ...). We reload and store into a temporary local variable
5146 assert(!IsIndirect && "Homogeneous aggregates should be passed directly");
5147 auto BaseTyInfo = getContext().getTypeInfoInChars(QualType(Base, 0));
5148 llvm::Type *BaseTy = CGF.ConvertType(QualType(Base, 0));
5149 llvm::Type *HFATy = llvm::ArrayType::get(BaseTy, NumMembers);
5150 Address Tmp = CGF.CreateTempAlloca(HFATy,
5151 std::max(TyAlign, BaseTyInfo.second));
5153 // On big-endian platforms, the value will be right-aligned in its slot.
5155 if (CGF.CGM.getDataLayout().isBigEndian() &&
5156 BaseTyInfo.first.getQuantity() < 16)
5157 Offset = 16 - BaseTyInfo.first.getQuantity();
5159 for (unsigned i = 0; i < NumMembers; ++i) {
5160 CharUnits BaseOffset = CharUnits::fromQuantity(16 * i + Offset);
5162 CGF.Builder.CreateConstInBoundsByteGEP(BaseAddr, BaseOffset);
5163 LoadAddr = CGF.Builder.CreateElementBitCast(LoadAddr, BaseTy);
5166 CGF.Builder.CreateConstArrayGEP(Tmp, i, BaseTyInfo.first);
5168 llvm::Value *Elem = CGF.Builder.CreateLoad(LoadAddr);
5169 CGF.Builder.CreateStore(Elem, StoreAddr);
5172 RegAddr = CGF.Builder.CreateElementBitCast(Tmp, MemTy);
5174 // Otherwise the object is contiguous in memory.
5176 // It might be right-aligned in its slot.
5177 CharUnits SlotSize = BaseAddr.getAlignment();
5178 if (CGF.CGM.getDataLayout().isBigEndian() && !IsIndirect &&
5179 (IsHFA || !isAggregateTypeForABI(Ty)) &&
5180 TyInfo.first < SlotSize) {
5181 CharUnits Offset = SlotSize - TyInfo.first;
5182 BaseAddr = CGF.Builder.CreateConstInBoundsByteGEP(BaseAddr, Offset);
5185 RegAddr = CGF.Builder.CreateElementBitCast(BaseAddr, MemTy);
5188 CGF.EmitBranch(ContBlock);
5190 //=======================================
5191 // Argument was on the stack
5192 //=======================================
5193 CGF.EmitBlock(OnStackBlock);
5195 Address stack_p = CGF.Builder.CreateStructGEP(VAListAddr, 0,
5196 CharUnits::Zero(), "stack_p");
5197 llvm::Value *OnStackPtr = CGF.Builder.CreateLoad(stack_p, "stack");
5199 // Again, stack arguments may need realignment. In this case both integer and
5200 // floating-point ones might be affected.
5201 if (!IsIndirect && TyAlign.getQuantity() > 8) {
5202 int Align = TyAlign.getQuantity();
5204 OnStackPtr = CGF.Builder.CreatePtrToInt(OnStackPtr, CGF.Int64Ty);
5206 OnStackPtr = CGF.Builder.CreateAdd(
5207 OnStackPtr, llvm::ConstantInt::get(CGF.Int64Ty, Align - 1),
5209 OnStackPtr = CGF.Builder.CreateAnd(
5210 OnStackPtr, llvm::ConstantInt::get(CGF.Int64Ty, -Align),
5213 OnStackPtr = CGF.Builder.CreateIntToPtr(OnStackPtr, CGF.Int8PtrTy);
5215 Address OnStackAddr(OnStackPtr,
5216 std::max(CharUnits::fromQuantity(8), TyAlign));
5218 // All stack slots are multiples of 8 bytes.
5219 CharUnits StackSlotSize = CharUnits::fromQuantity(8);
5220 CharUnits StackSize;
5222 StackSize = StackSlotSize;
5224 StackSize = TyInfo.first.alignTo(StackSlotSize);
5226 llvm::Value *StackSizeC = CGF.Builder.getSize(StackSize);
5227 llvm::Value *NewStack =
5228 CGF.Builder.CreateInBoundsGEP(OnStackPtr, StackSizeC, "new_stack");
5230 // Write the new value of __stack for the next call to va_arg
5231 CGF.Builder.CreateStore(NewStack, stack_p);
5233 if (CGF.CGM.getDataLayout().isBigEndian() && !isAggregateTypeForABI(Ty) &&
5234 TyInfo.first < StackSlotSize) {
5235 CharUnits Offset = StackSlotSize - TyInfo.first;
5236 OnStackAddr = CGF.Builder.CreateConstInBoundsByteGEP(OnStackAddr, Offset);
5239 OnStackAddr = CGF.Builder.CreateElementBitCast(OnStackAddr, MemTy);
5241 CGF.EmitBranch(ContBlock);
5243 //=======================================
5245 //=======================================
5246 CGF.EmitBlock(ContBlock);
5248 Address ResAddr = emitMergePHI(CGF, RegAddr, InRegBlock,
5249 OnStackAddr, OnStackBlock, "vaargs.addr");
5252 return Address(CGF.Builder.CreateLoad(ResAddr, "vaarg.addr"),
5258 Address AArch64ABIInfo::EmitDarwinVAArg(Address VAListAddr, QualType Ty,
5259 CodeGenFunction &CGF) const {
5260 // The backend's lowering doesn't support va_arg for aggregates or
5261 // illegal vector types. Lower VAArg here for these cases and use
5262 // the LLVM va_arg instruction for everything else.
5263 if (!isAggregateTypeForABI(Ty) && !isIllegalVectorType(Ty))
5264 return EmitVAArgInstr(CGF, VAListAddr, Ty, ABIArgInfo::getDirect());
5266 CharUnits SlotSize = CharUnits::fromQuantity(8);
5268 // Empty records are ignored for parameter passing purposes.
5269 if (isEmptyRecord(getContext(), Ty, true)) {
5270 Address Addr(CGF.Builder.CreateLoad(VAListAddr, "ap.cur"), SlotSize);
5271 Addr = CGF.Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(Ty));
5275 // The size of the actual thing passed, which might end up just
5276 // being a pointer for indirect types.
5277 auto TyInfo = getContext().getTypeInfoInChars(Ty);
5279 // Arguments bigger than 16 bytes which aren't homogeneous
5280 // aggregates should be passed indirectly.
5281 bool IsIndirect = false;
5282 if (TyInfo.first.getQuantity() > 16) {
5283 const Type *Base = nullptr;
5284 uint64_t Members = 0;
5285 IsIndirect = !isHomogeneousAggregate(Ty, Base, Members);
5288 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect,
5289 TyInfo, SlotSize, /*AllowHigherAlign*/ true);
5292 //===----------------------------------------------------------------------===//
5293 // ARM ABI Implementation
5294 //===----------------------------------------------------------------------===//
5298 class ARMABIInfo : public SwiftABIInfo {
5311 ARMABIInfo(CodeGenTypes &CGT, ABIKind _Kind)
5312 : SwiftABIInfo(CGT), Kind(_Kind) {
5316 bool isEABI() const {
5317 switch (getTarget().getTriple().getEnvironment()) {
5318 case llvm::Triple::Android:
5319 case llvm::Triple::EABI:
5320 case llvm::Triple::EABIHF:
5321 case llvm::Triple::GNUEABI:
5322 case llvm::Triple::GNUEABIHF:
5323 case llvm::Triple::MuslEABI:
5324 case llvm::Triple::MuslEABIHF:
5331 bool isEABIHF() const {
5332 switch (getTarget().getTriple().getEnvironment()) {
5333 case llvm::Triple::EABIHF:
5334 case llvm::Triple::GNUEABIHF:
5335 case llvm::Triple::MuslEABIHF:
5342 ABIKind getABIKind() const { return Kind; }
5345 ABIArgInfo classifyReturnType(QualType RetTy, bool isVariadic) const;
5346 ABIArgInfo classifyArgumentType(QualType RetTy, bool isVariadic) const;
5347 bool isIllegalVectorType(QualType Ty) const;
5349 bool isHomogeneousAggregateBaseType(QualType Ty) const override;
5350 bool isHomogeneousAggregateSmallEnough(const Type *Ty,
5351 uint64_t Members) const override;
5353 void computeInfo(CGFunctionInfo &FI) const override;
5355 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
5356 QualType Ty) const override;
5358 llvm::CallingConv::ID getLLVMDefaultCC() const;
5359 llvm::CallingConv::ID getABIDefaultCC() const;
5362 bool shouldPassIndirectlyForSwift(CharUnits totalSize,
5363 ArrayRef<llvm::Type*> scalars,
5364 bool asReturnValue) const override {
5365 return occupiesMoreThan(CGT, scalars, /*total*/ 4);
5367 bool isSwiftErrorInRegister() const override {
5372 class ARMTargetCodeGenInfo : public TargetCodeGenInfo {
5374 ARMTargetCodeGenInfo(CodeGenTypes &CGT, ARMABIInfo::ABIKind K)
5375 :TargetCodeGenInfo(new ARMABIInfo(CGT, K)) {}
5377 const ARMABIInfo &getABIInfo() const {
5378 return static_cast<const ARMABIInfo&>(TargetCodeGenInfo::getABIInfo());
5381 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
5385 StringRef getARCRetainAutoreleasedReturnValueMarker() const override {
5386 return "mov\tr7, r7\t\t@ marker for objc_retainAutoreleaseReturnValue";
5389 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
5390 llvm::Value *Address) const override {
5391 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4);
5393 // 0-15 are the 16 integer registers.
5394 AssignToArrayRange(CGF.Builder, Address, Four8, 0, 15);
5398 unsigned getSizeOfUnwindException() const override {
5399 if (getABIInfo().isEABI()) return 88;
5400 return TargetCodeGenInfo::getSizeOfUnwindException();
5403 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
5404 CodeGen::CodeGenModule &CGM) const override {
5405 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
5409 const ARMInterruptAttr *Attr = FD->getAttr<ARMInterruptAttr>();
5414 switch (Attr->getInterrupt()) {
5415 case ARMInterruptAttr::Generic: Kind = ""; break;
5416 case ARMInterruptAttr::IRQ: Kind = "IRQ"; break;
5417 case ARMInterruptAttr::FIQ: Kind = "FIQ"; break;
5418 case ARMInterruptAttr::SWI: Kind = "SWI"; break;
5419 case ARMInterruptAttr::ABORT: Kind = "ABORT"; break;
5420 case ARMInterruptAttr::UNDEF: Kind = "UNDEF"; break;
5423 llvm::Function *Fn = cast<llvm::Function>(GV);
5425 Fn->addFnAttr("interrupt", Kind);
5427 ARMABIInfo::ABIKind ABI = cast<ARMABIInfo>(getABIInfo()).getABIKind();
5428 if (ABI == ARMABIInfo::APCS)
5431 // AAPCS guarantees that sp will be 8-byte aligned on any public interface,
5432 // however this is not necessarily true on taking any interrupt. Instruct
5433 // the backend to perform a realignment as part of the function prologue.
5434 llvm::AttrBuilder B;
5435 B.addStackAlignmentAttr(8);
5436 Fn->addAttributes(llvm::AttributeSet::FunctionIndex,
5437 llvm::AttributeSet::get(CGM.getLLVMContext(),
5438 llvm::AttributeSet::FunctionIndex,
5443 class WindowsARMTargetCodeGenInfo : public ARMTargetCodeGenInfo {
5445 WindowsARMTargetCodeGenInfo(CodeGenTypes &CGT, ARMABIInfo::ABIKind K)
5446 : ARMTargetCodeGenInfo(CGT, K) {}
5448 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
5449 CodeGen::CodeGenModule &CGM) const override;
5451 void getDependentLibraryOption(llvm::StringRef Lib,
5452 llvm::SmallString<24> &Opt) const override {
5453 Opt = "/DEFAULTLIB:" + qualifyWindowsLibrary(Lib);
5456 void getDetectMismatchOption(llvm::StringRef Name, llvm::StringRef Value,
5457 llvm::SmallString<32> &Opt) const override {
5458 Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\"";
5462 void WindowsARMTargetCodeGenInfo::setTargetAttributes(
5463 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const {
5464 ARMTargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
5465 addStackProbeSizeTargetAttribute(D, GV, CGM);
5469 void ARMABIInfo::computeInfo(CGFunctionInfo &FI) const {
5470 if (!getCXXABI().classifyReturnType(FI))
5471 FI.getReturnInfo() =
5472 classifyReturnType(FI.getReturnType(), FI.isVariadic());
5474 for (auto &I : FI.arguments())
5475 I.info = classifyArgumentType(I.type, FI.isVariadic());
5477 // Always honor user-specified calling convention.
5478 if (FI.getCallingConvention() != llvm::CallingConv::C)
5481 llvm::CallingConv::ID cc = getRuntimeCC();
5482 if (cc != llvm::CallingConv::C)
5483 FI.setEffectiveCallingConvention(cc);
5486 /// Return the default calling convention that LLVM will use.
5487 llvm::CallingConv::ID ARMABIInfo::getLLVMDefaultCC() const {
5488 // The default calling convention that LLVM will infer.
5489 if (isEABIHF() || getTarget().getTriple().isWatchABI())
5490 return llvm::CallingConv::ARM_AAPCS_VFP;
5492 return llvm::CallingConv::ARM_AAPCS;
5494 return llvm::CallingConv::ARM_APCS;
5497 /// Return the calling convention that our ABI would like us to use
5498 /// as the C calling convention.
5499 llvm::CallingConv::ID ARMABIInfo::getABIDefaultCC() const {
5500 switch (getABIKind()) {
5501 case APCS: return llvm::CallingConv::ARM_APCS;
5502 case AAPCS: return llvm::CallingConv::ARM_AAPCS;
5503 case AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP;
5504 case AAPCS16_VFP: return llvm::CallingConv::ARM_AAPCS_VFP;
5506 llvm_unreachable("bad ABI kind");
5509 void ARMABIInfo::setCCs() {
5510 assert(getRuntimeCC() == llvm::CallingConv::C);
5512 // Don't muddy up the IR with a ton of explicit annotations if
5513 // they'd just match what LLVM will infer from the triple.
5514 llvm::CallingConv::ID abiCC = getABIDefaultCC();
5515 if (abiCC != getLLVMDefaultCC())
5518 // AAPCS apparently requires runtime support functions to be soft-float, but
5519 // that's almost certainly for historic reasons (Thumb1 not supporting VFP
5520 // most likely). It's more convenient for AAPCS16_VFP to be hard-float.
5521 switch (getABIKind()) {
5524 if (abiCC != getLLVMDefaultCC())
5529 BuiltinCC = llvm::CallingConv::ARM_AAPCS;
5534 ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty,
5535 bool isVariadic) const {
5536 // 6.1.2.1 The following argument types are VFP CPRCs:
5537 // A single-precision floating-point type (including promoted
5538 // half-precision types); A double-precision floating-point type;
5539 // A 64-bit or 128-bit containerized vector type; Homogeneous Aggregate
5540 // with a Base Type of a single- or double-precision floating-point type,
5541 // 64-bit containerized vectors or 128-bit containerized vectors with one
5542 // to four Elements.
5543 bool IsEffectivelyAAPCS_VFP = getABIKind() == AAPCS_VFP && !isVariadic;
5545 Ty = useFirstFieldIfTransparentUnion(Ty);
5547 // Handle illegal vector types here.
5548 if (isIllegalVectorType(Ty)) {
5549 uint64_t Size = getContext().getTypeSize(Ty);
5551 llvm::Type *ResType =
5552 llvm::Type::getInt32Ty(getVMContext());
5553 return ABIArgInfo::getDirect(ResType);
5556 llvm::Type *ResType = llvm::VectorType::get(
5557 llvm::Type::getInt32Ty(getVMContext()), 2);
5558 return ABIArgInfo::getDirect(ResType);
5561 llvm::Type *ResType = llvm::VectorType::get(
5562 llvm::Type::getInt32Ty(getVMContext()), 4);
5563 return ABIArgInfo::getDirect(ResType);
5565 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
5568 // __fp16 gets passed as if it were an int or float, but with the top 16 bits
5569 // unspecified. This is not done for OpenCL as it handles the half type
5570 // natively, and does not need to interwork with AAPCS code.
5571 if (Ty->isHalfType() && !getContext().getLangOpts().NativeHalfArgsAndReturns) {
5572 llvm::Type *ResType = IsEffectivelyAAPCS_VFP ?
5573 llvm::Type::getFloatTy(getVMContext()) :
5574 llvm::Type::getInt32Ty(getVMContext());
5575 return ABIArgInfo::getDirect(ResType);
5578 if (!isAggregateTypeForABI(Ty)) {
5579 // Treat an enum type as its underlying type.
5580 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) {
5581 Ty = EnumTy->getDecl()->getIntegerType();
5584 return (Ty->isPromotableIntegerType() ? ABIArgInfo::getExtend()
5585 : ABIArgInfo::getDirect());
5588 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) {
5589 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
5592 // Ignore empty records.
5593 if (isEmptyRecord(getContext(), Ty, true))
5594 return ABIArgInfo::getIgnore();
5596 if (IsEffectivelyAAPCS_VFP) {
5597 // Homogeneous Aggregates need to be expanded when we can fit the aggregate
5598 // into VFP registers.
5599 const Type *Base = nullptr;
5600 uint64_t Members = 0;
5601 if (isHomogeneousAggregate(Ty, Base, Members)) {
5602 assert(Base && "Base class should be set for homogeneous aggregate");
5603 // Base can be a floating-point or a vector.
5604 return ABIArgInfo::getDirect(nullptr, 0, nullptr, false);
5606 } else if (getABIKind() == ARMABIInfo::AAPCS16_VFP) {
5607 // WatchOS does have homogeneous aggregates. Note that we intentionally use
5608 // this convention even for a variadic function: the backend will use GPRs
5610 const Type *Base = nullptr;
5611 uint64_t Members = 0;
5612 if (isHomogeneousAggregate(Ty, Base, Members)) {
5613 assert(Base && Members <= 4 && "unexpected homogeneous aggregate");
5615 llvm::ArrayType::get(CGT.ConvertType(QualType(Base, 0)), Members);
5616 return ABIArgInfo::getDirect(Ty, 0, nullptr, false);
5620 if (getABIKind() == ARMABIInfo::AAPCS16_VFP &&
5621 getContext().getTypeSizeInChars(Ty) > CharUnits::fromQuantity(16)) {
5622 // WatchOS is adopting the 64-bit AAPCS rule on composite types: if they're
5623 // bigger than 128-bits, they get placed in space allocated by the caller,
5624 // and a pointer is passed.
5625 return ABIArgInfo::getIndirect(
5626 CharUnits::fromQuantity(getContext().getTypeAlign(Ty) / 8), false);
5629 // Support byval for ARM.
5630 // The ABI alignment for APCS is 4-byte and for AAPCS at least 4-byte and at
5631 // most 8-byte. We realign the indirect argument if type alignment is bigger
5632 // than ABI alignment.
5633 uint64_t ABIAlign = 4;
5634 uint64_t TyAlign = getContext().getTypeAlign(Ty) / 8;
5635 if (getABIKind() == ARMABIInfo::AAPCS_VFP ||
5636 getABIKind() == ARMABIInfo::AAPCS)
5637 ABIAlign = std::min(std::max(TyAlign, (uint64_t)4), (uint64_t)8);
5639 if (getContext().getTypeSizeInChars(Ty) > CharUnits::fromQuantity(64)) {
5640 assert(getABIKind() != ARMABIInfo::AAPCS16_VFP && "unexpected byval");
5641 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(ABIAlign),
5643 /*Realign=*/TyAlign > ABIAlign);
5646 // On RenderScript, coerce Aggregates <= 64 bytes to an integer array of
5647 // same size and alignment.
5648 if (getTarget().isRenderScriptTarget()) {
5649 return coerceToIntArray(Ty, getContext(), getVMContext());
5652 // Otherwise, pass by coercing to a structure of the appropriate size.
5655 // FIXME: Try to match the types of the arguments more accurately where
5657 if (getContext().getTypeAlign(Ty) <= 32) {
5658 ElemTy = llvm::Type::getInt32Ty(getVMContext());
5659 SizeRegs = (getContext().getTypeSize(Ty) + 31) / 32;
5661 ElemTy = llvm::Type::getInt64Ty(getVMContext());
5662 SizeRegs = (getContext().getTypeSize(Ty) + 63) / 64;
5665 return ABIArgInfo::getDirect(llvm::ArrayType::get(ElemTy, SizeRegs));
5668 static bool isIntegerLikeType(QualType Ty, ASTContext &Context,
5669 llvm::LLVMContext &VMContext) {
5670 // APCS, C Language Calling Conventions, Non-Simple Return Values: A structure
5671 // is called integer-like if its size is less than or equal to one word, and
5672 // the offset of each of its addressable sub-fields is zero.
5674 uint64_t Size = Context.getTypeSize(Ty);
5676 // Check that the type fits in a word.
5680 // FIXME: Handle vector types!
5681 if (Ty->isVectorType())
5684 // Float types are never treated as "integer like".
5685 if (Ty->isRealFloatingType())
5688 // If this is a builtin or pointer type then it is ok.
5689 if (Ty->getAs<BuiltinType>() || Ty->isPointerType())
5692 // Small complex integer types are "integer like".
5693 if (const ComplexType *CT = Ty->getAs<ComplexType>())
5694 return isIntegerLikeType(CT->getElementType(), Context, VMContext);
5696 // Single element and zero sized arrays should be allowed, by the definition
5697 // above, but they are not.
5699 // Otherwise, it must be a record type.
5700 const RecordType *RT = Ty->getAs<RecordType>();
5701 if (!RT) return false;
5703 // Ignore records with flexible arrays.
5704 const RecordDecl *RD = RT->getDecl();
5705 if (RD->hasFlexibleArrayMember())
5708 // Check that all sub-fields are at offset 0, and are themselves "integer
5710 const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
5712 bool HadField = false;
5714 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
5715 i != e; ++i, ++idx) {
5716 const FieldDecl *FD = *i;
5718 // Bit-fields are not addressable, we only need to verify they are "integer
5719 // like". We still have to disallow a subsequent non-bitfield, for example:
5720 // struct { int : 0; int x }
5721 // is non-integer like according to gcc.
5722 if (FD->isBitField()) {
5726 if (!isIntegerLikeType(FD->getType(), Context, VMContext))
5732 // Check if this field is at offset 0.
5733 if (Layout.getFieldOffset(idx) != 0)
5736 if (!isIntegerLikeType(FD->getType(), Context, VMContext))
5739 // Only allow at most one field in a structure. This doesn't match the
5740 // wording above, but follows gcc in situations with a field following an
5742 if (!RD->isUnion()) {
5753 ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy,
5754 bool isVariadic) const {
5755 bool IsEffectivelyAAPCS_VFP =
5756 (getABIKind() == AAPCS_VFP || getABIKind() == AAPCS16_VFP) && !isVariadic;
5758 if (RetTy->isVoidType())
5759 return ABIArgInfo::getIgnore();
5761 // Large vector types should be returned via memory.
5762 if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 128) {
5763 return getNaturalAlignIndirect(RetTy);
5766 // __fp16 gets returned as if it were an int or float, but with the top 16
5767 // bits unspecified. This is not done for OpenCL as it handles the half type
5768 // natively, and does not need to interwork with AAPCS code.
5769 if (RetTy->isHalfType() && !getContext().getLangOpts().NativeHalfArgsAndReturns) {
5770 llvm::Type *ResType = IsEffectivelyAAPCS_VFP ?
5771 llvm::Type::getFloatTy(getVMContext()) :
5772 llvm::Type::getInt32Ty(getVMContext());
5773 return ABIArgInfo::getDirect(ResType);
5776 if (!isAggregateTypeForABI(RetTy)) {
5777 // Treat an enum type as its underlying type.
5778 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
5779 RetTy = EnumTy->getDecl()->getIntegerType();
5781 return RetTy->isPromotableIntegerType() ? ABIArgInfo::getExtend()
5782 : ABIArgInfo::getDirect();
5785 // Are we following APCS?
5786 if (getABIKind() == APCS) {
5787 if (isEmptyRecord(getContext(), RetTy, false))
5788 return ABIArgInfo::getIgnore();
5790 // Complex types are all returned as packed integers.
5792 // FIXME: Consider using 2 x vector types if the back end handles them
5794 if (RetTy->isAnyComplexType())
5795 return ABIArgInfo::getDirect(llvm::IntegerType::get(
5796 getVMContext(), getContext().getTypeSize(RetTy)));
5798 // Integer like structures are returned in r0.
5799 if (isIntegerLikeType(RetTy, getContext(), getVMContext())) {
5800 // Return in the smallest viable integer type.
5801 uint64_t Size = getContext().getTypeSize(RetTy);
5803 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
5805 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
5806 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
5809 // Otherwise return in memory.
5810 return getNaturalAlignIndirect(RetTy);
5813 // Otherwise this is an AAPCS variant.
5815 if (isEmptyRecord(getContext(), RetTy, true))
5816 return ABIArgInfo::getIgnore();
5818 // Check for homogeneous aggregates with AAPCS-VFP.
5819 if (IsEffectivelyAAPCS_VFP) {
5820 const Type *Base = nullptr;
5821 uint64_t Members = 0;
5822 if (isHomogeneousAggregate(RetTy, Base, Members)) {
5823 assert(Base && "Base class should be set for homogeneous aggregate");
5824 // Homogeneous Aggregates are returned directly.
5825 return ABIArgInfo::getDirect(nullptr, 0, nullptr, false);
5829 // Aggregates <= 4 bytes are returned in r0; other aggregates
5830 // are returned indirectly.
5831 uint64_t Size = getContext().getTypeSize(RetTy);
5833 // On RenderScript, coerce Aggregates <= 4 bytes to an integer array of
5834 // same size and alignment.
5835 if (getTarget().isRenderScriptTarget()) {
5836 return coerceToIntArray(RetTy, getContext(), getVMContext());
5838 if (getDataLayout().isBigEndian())
5839 // Return in 32 bit integer integer type (as if loaded by LDR, AAPCS 5.4)
5840 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
5842 // Return in the smallest viable integer type.
5844 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
5846 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
5847 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
5848 } else if (Size <= 128 && getABIKind() == AAPCS16_VFP) {
5849 llvm::Type *Int32Ty = llvm::Type::getInt32Ty(getVMContext());
5850 llvm::Type *CoerceTy =
5851 llvm::ArrayType::get(Int32Ty, llvm::alignTo(Size, 32) / 32);
5852 return ABIArgInfo::getDirect(CoerceTy);
5855 return getNaturalAlignIndirect(RetTy);
5858 /// isIllegalVector - check whether Ty is an illegal vector type.
5859 bool ARMABIInfo::isIllegalVectorType(QualType Ty) const {
5860 if (const VectorType *VT = Ty->getAs<VectorType> ()) {
5862 // Android shipped using Clang 3.1, which supported a slightly different
5863 // vector ABI. The primary differences were that 3-element vector types
5864 // were legal, and so were sub 32-bit vectors (i.e. <2 x i8>). This path
5865 // accepts that legacy behavior for Android only.
5866 // Check whether VT is legal.
5867 unsigned NumElements = VT->getNumElements();
5868 // NumElements should be power of 2 or equal to 3.
5869 if (!llvm::isPowerOf2_32(NumElements) && NumElements != 3)
5872 // Check whether VT is legal.
5873 unsigned NumElements = VT->getNumElements();
5874 uint64_t Size = getContext().getTypeSize(VT);
5875 // NumElements should be power of 2.
5876 if (!llvm::isPowerOf2_32(NumElements))
5878 // Size should be greater than 32 bits.
5885 bool ARMABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
5886 // Homogeneous aggregates for AAPCS-VFP must have base types of float,
5887 // double, or 64-bit or 128-bit vectors.
5888 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
5889 if (BT->getKind() == BuiltinType::Float ||
5890 BT->getKind() == BuiltinType::Double ||
5891 BT->getKind() == BuiltinType::LongDouble)
5893 } else if (const VectorType *VT = Ty->getAs<VectorType>()) {
5894 unsigned VecSize = getContext().getTypeSize(VT);
5895 if (VecSize == 64 || VecSize == 128)
5901 bool ARMABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base,
5902 uint64_t Members) const {
5903 return Members <= 4;
5906 Address ARMABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
5907 QualType Ty) const {
5908 CharUnits SlotSize = CharUnits::fromQuantity(4);
5910 // Empty records are ignored for parameter passing purposes.
5911 if (isEmptyRecord(getContext(), Ty, true)) {
5912 Address Addr(CGF.Builder.CreateLoad(VAListAddr), SlotSize);
5913 Addr = CGF.Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(Ty));
5917 auto TyInfo = getContext().getTypeInfoInChars(Ty);
5918 CharUnits TyAlignForABI = TyInfo.second;
5920 // Use indirect if size of the illegal vector is bigger than 16 bytes.
5921 bool IsIndirect = false;
5922 const Type *Base = nullptr;
5923 uint64_t Members = 0;
5924 if (TyInfo.first > CharUnits::fromQuantity(16) && isIllegalVectorType(Ty)) {
5927 // ARMv7k passes structs bigger than 16 bytes indirectly, in space
5928 // allocated by the caller.
5929 } else if (TyInfo.first > CharUnits::fromQuantity(16) &&
5930 getABIKind() == ARMABIInfo::AAPCS16_VFP &&
5931 !isHomogeneousAggregate(Ty, Base, Members)) {
5934 // Otherwise, bound the type's ABI alignment.
5935 // The ABI alignment for 64-bit or 128-bit vectors is 8 for AAPCS and 4 for
5936 // APCS. For AAPCS, the ABI alignment is at least 4-byte and at most 8-byte.
5937 // Our callers should be prepared to handle an under-aligned address.
5938 } else if (getABIKind() == ARMABIInfo::AAPCS_VFP ||
5939 getABIKind() == ARMABIInfo::AAPCS) {
5940 TyAlignForABI = std::max(TyAlignForABI, CharUnits::fromQuantity(4));
5941 TyAlignForABI = std::min(TyAlignForABI, CharUnits::fromQuantity(8));
5942 } else if (getABIKind() == ARMABIInfo::AAPCS16_VFP) {
5943 // ARMv7k allows type alignment up to 16 bytes.
5944 TyAlignForABI = std::max(TyAlignForABI, CharUnits::fromQuantity(4));
5945 TyAlignForABI = std::min(TyAlignForABI, CharUnits::fromQuantity(16));
5947 TyAlignForABI = CharUnits::fromQuantity(4);
5949 TyInfo.second = TyAlignForABI;
5951 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect, TyInfo,
5952 SlotSize, /*AllowHigherAlign*/ true);
5955 //===----------------------------------------------------------------------===//
5956 // NVPTX ABI Implementation
5957 //===----------------------------------------------------------------------===//
5961 class NVPTXABIInfo : public ABIInfo {
5963 NVPTXABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
5965 ABIArgInfo classifyReturnType(QualType RetTy) const;
5966 ABIArgInfo classifyArgumentType(QualType Ty) const;
5968 void computeInfo(CGFunctionInfo &FI) const override;
5969 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
5970 QualType Ty) const override;
5973 class NVPTXTargetCodeGenInfo : public TargetCodeGenInfo {
5975 NVPTXTargetCodeGenInfo(CodeGenTypes &CGT)
5976 : TargetCodeGenInfo(new NVPTXABIInfo(CGT)) {}
5978 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
5979 CodeGen::CodeGenModule &M) const override;
5981 // Adds a NamedMDNode with F, Name, and Operand as operands, and adds the
5982 // resulting MDNode to the nvvm.annotations MDNode.
5983 static void addNVVMMetadata(llvm::Function *F, StringRef Name, int Operand);
5986 ABIArgInfo NVPTXABIInfo::classifyReturnType(QualType RetTy) const {
5987 if (RetTy->isVoidType())
5988 return ABIArgInfo::getIgnore();
5990 // note: this is different from default ABI
5991 if (!RetTy->isScalarType())
5992 return ABIArgInfo::getDirect();
5994 // Treat an enum type as its underlying type.
5995 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
5996 RetTy = EnumTy->getDecl()->getIntegerType();
5998 return (RetTy->isPromotableIntegerType() ?
5999 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
6002 ABIArgInfo NVPTXABIInfo::classifyArgumentType(QualType Ty) const {
6003 // Treat an enum type as its underlying type.
6004 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
6005 Ty = EnumTy->getDecl()->getIntegerType();
6007 // Return aggregates type as indirect by value
6008 if (isAggregateTypeForABI(Ty))
6009 return getNaturalAlignIndirect(Ty, /* byval */ true);
6011 return (Ty->isPromotableIntegerType() ?
6012 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
6015 void NVPTXABIInfo::computeInfo(CGFunctionInfo &FI) const {
6016 if (!getCXXABI().classifyReturnType(FI))
6017 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
6018 for (auto &I : FI.arguments())
6019 I.info = classifyArgumentType(I.type);
6021 // Always honor user-specified calling convention.
6022 if (FI.getCallingConvention() != llvm::CallingConv::C)
6025 FI.setEffectiveCallingConvention(getRuntimeCC());
6028 Address NVPTXABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
6029 QualType Ty) const {
6030 llvm_unreachable("NVPTX does not support varargs");
6033 void NVPTXTargetCodeGenInfo::
6034 setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
6035 CodeGen::CodeGenModule &M) const{
6036 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
6039 llvm::Function *F = cast<llvm::Function>(GV);
6041 // Perform special handling in OpenCL mode
6042 if (M.getLangOpts().OpenCL) {
6043 // Use OpenCL function attributes to check for kernel functions
6044 // By default, all functions are device functions
6045 if (FD->hasAttr<OpenCLKernelAttr>()) {
6046 // OpenCL __kernel functions get kernel metadata
6047 // Create !{<func-ref>, metadata !"kernel", i32 1} node
6048 addNVVMMetadata(F, "kernel", 1);
6049 // And kernel functions are not subject to inlining
6050 F->addFnAttr(llvm::Attribute::NoInline);
6054 // Perform special handling in CUDA mode.
6055 if (M.getLangOpts().CUDA) {
6056 // CUDA __global__ functions get a kernel metadata entry. Since
6057 // __global__ functions cannot be called from the device, we do not
6058 // need to set the noinline attribute.
6059 if (FD->hasAttr<CUDAGlobalAttr>()) {
6060 // Create !{<func-ref>, metadata !"kernel", i32 1} node
6061 addNVVMMetadata(F, "kernel", 1);
6063 if (CUDALaunchBoundsAttr *Attr = FD->getAttr<CUDALaunchBoundsAttr>()) {
6064 // Create !{<func-ref>, metadata !"maxntidx", i32 <val>} node
6065 llvm::APSInt MaxThreads(32);
6066 MaxThreads = Attr->getMaxThreads()->EvaluateKnownConstInt(M.getContext());
6068 addNVVMMetadata(F, "maxntidx", MaxThreads.getExtValue());
6070 // min blocks is an optional argument for CUDALaunchBoundsAttr. If it was
6071 // not specified in __launch_bounds__ or if the user specified a 0 value,
6072 // we don't have to add a PTX directive.
6073 if (Attr->getMinBlocks()) {
6074 llvm::APSInt MinBlocks(32);
6075 MinBlocks = Attr->getMinBlocks()->EvaluateKnownConstInt(M.getContext());
6077 // Create !{<func-ref>, metadata !"minctasm", i32 <val>} node
6078 addNVVMMetadata(F, "minctasm", MinBlocks.getExtValue());
6084 void NVPTXTargetCodeGenInfo::addNVVMMetadata(llvm::Function *F, StringRef Name,
6086 llvm::Module *M = F->getParent();
6087 llvm::LLVMContext &Ctx = M->getContext();
6089 // Get "nvvm.annotations" metadata node
6090 llvm::NamedMDNode *MD = M->getOrInsertNamedMetadata("nvvm.annotations");
6092 llvm::Metadata *MDVals[] = {
6093 llvm::ConstantAsMetadata::get(F), llvm::MDString::get(Ctx, Name),
6094 llvm::ConstantAsMetadata::get(
6095 llvm::ConstantInt::get(llvm::Type::getInt32Ty(Ctx), Operand))};
6096 // Append metadata to nvvm.annotations
6097 MD->addOperand(llvm::MDNode::get(Ctx, MDVals));
6101 //===----------------------------------------------------------------------===//
6102 // SystemZ ABI Implementation
6103 //===----------------------------------------------------------------------===//
6107 class SystemZABIInfo : public SwiftABIInfo {
6111 SystemZABIInfo(CodeGenTypes &CGT, bool HV)
6112 : SwiftABIInfo(CGT), HasVector(HV) {}
6114 bool isPromotableIntegerType(QualType Ty) const;
6115 bool isCompoundType(QualType Ty) const;
6116 bool isVectorArgumentType(QualType Ty) const;
6117 bool isFPArgumentType(QualType Ty) const;
6118 QualType GetSingleElementType(QualType Ty) const;
6120 ABIArgInfo classifyReturnType(QualType RetTy) const;
6121 ABIArgInfo classifyArgumentType(QualType ArgTy) const;
6123 void computeInfo(CGFunctionInfo &FI) const override {
6124 if (!getCXXABI().classifyReturnType(FI))
6125 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
6126 for (auto &I : FI.arguments())
6127 I.info = classifyArgumentType(I.type);
6130 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
6131 QualType Ty) const override;
6133 bool shouldPassIndirectlyForSwift(CharUnits totalSize,
6134 ArrayRef<llvm::Type*> scalars,
6135 bool asReturnValue) const override {
6136 return occupiesMoreThan(CGT, scalars, /*total*/ 4);
6138 bool isSwiftErrorInRegister() const override {
6143 class SystemZTargetCodeGenInfo : public TargetCodeGenInfo {
6145 SystemZTargetCodeGenInfo(CodeGenTypes &CGT, bool HasVector)
6146 : TargetCodeGenInfo(new SystemZABIInfo(CGT, HasVector)) {}
6151 bool SystemZABIInfo::isPromotableIntegerType(QualType Ty) const {
6152 // Treat an enum type as its underlying type.
6153 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
6154 Ty = EnumTy->getDecl()->getIntegerType();
6156 // Promotable integer types are required to be promoted by the ABI.
6157 if (Ty->isPromotableIntegerType())
6160 // 32-bit values must also be promoted.
6161 if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
6162 switch (BT->getKind()) {
6163 case BuiltinType::Int:
6164 case BuiltinType::UInt:
6172 bool SystemZABIInfo::isCompoundType(QualType Ty) const {
6173 return (Ty->isAnyComplexType() ||
6174 Ty->isVectorType() ||
6175 isAggregateTypeForABI(Ty));
6178 bool SystemZABIInfo::isVectorArgumentType(QualType Ty) const {
6179 return (HasVector &&
6180 Ty->isVectorType() &&
6181 getContext().getTypeSize(Ty) <= 128);
6184 bool SystemZABIInfo::isFPArgumentType(QualType Ty) const {
6185 if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
6186 switch (BT->getKind()) {
6187 case BuiltinType::Float:
6188 case BuiltinType::Double:
6197 QualType SystemZABIInfo::GetSingleElementType(QualType Ty) const {
6198 if (const RecordType *RT = Ty->getAsStructureType()) {
6199 const RecordDecl *RD = RT->getDecl();
6202 // If this is a C++ record, check the bases first.
6203 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
6204 for (const auto &I : CXXRD->bases()) {
6205 QualType Base = I.getType();
6207 // Empty bases don't affect things either way.
6208 if (isEmptyRecord(getContext(), Base, true))
6211 if (!Found.isNull())
6213 Found = GetSingleElementType(Base);
6216 // Check the fields.
6217 for (const auto *FD : RD->fields()) {
6218 // For compatibility with GCC, ignore empty bitfields in C++ mode.
6219 // Unlike isSingleElementStruct(), empty structure and array fields
6220 // do count. So do anonymous bitfields that aren't zero-sized.
6221 if (getContext().getLangOpts().CPlusPlus &&
6222 FD->isBitField() && FD->getBitWidthValue(getContext()) == 0)
6225 // Unlike isSingleElementStruct(), arrays do not count.
6226 // Nested structures still do though.
6227 if (!Found.isNull())
6229 Found = GetSingleElementType(FD->getType());
6232 // Unlike isSingleElementStruct(), trailing padding is allowed.
6233 // An 8-byte aligned struct s { float f; } is passed as a double.
6234 if (!Found.isNull())
6241 Address SystemZABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
6242 QualType Ty) const {
6243 // Assume that va_list type is correct; should be pointer to LLVM type:
6247 // i8 *__overflow_arg_area;
6248 // i8 *__reg_save_area;
6251 // Every non-vector argument occupies 8 bytes and is passed by preference
6252 // in either GPRs or FPRs. Vector arguments occupy 8 or 16 bytes and are
6253 // always passed on the stack.
6254 Ty = getContext().getCanonicalType(Ty);
6255 auto TyInfo = getContext().getTypeInfoInChars(Ty);
6256 llvm::Type *ArgTy = CGF.ConvertTypeForMem(Ty);
6257 llvm::Type *DirectTy = ArgTy;
6258 ABIArgInfo AI = classifyArgumentType(Ty);
6259 bool IsIndirect = AI.isIndirect();
6260 bool InFPRs = false;
6261 bool IsVector = false;
6262 CharUnits UnpaddedSize;
6263 CharUnits DirectAlign;
6265 DirectTy = llvm::PointerType::getUnqual(DirectTy);
6266 UnpaddedSize = DirectAlign = CharUnits::fromQuantity(8);
6268 if (AI.getCoerceToType())
6269 ArgTy = AI.getCoerceToType();
6270 InFPRs = ArgTy->isFloatTy() || ArgTy->isDoubleTy();
6271 IsVector = ArgTy->isVectorTy();
6272 UnpaddedSize = TyInfo.first;
6273 DirectAlign = TyInfo.second;
6275 CharUnits PaddedSize = CharUnits::fromQuantity(8);
6276 if (IsVector && UnpaddedSize > PaddedSize)
6277 PaddedSize = CharUnits::fromQuantity(16);
6278 assert((UnpaddedSize <= PaddedSize) && "Invalid argument size.");
6280 CharUnits Padding = (PaddedSize - UnpaddedSize);
6282 llvm::Type *IndexTy = CGF.Int64Ty;
6283 llvm::Value *PaddedSizeV =
6284 llvm::ConstantInt::get(IndexTy, PaddedSize.getQuantity());
6287 // Work out the address of a vector argument on the stack.
6288 // Vector arguments are always passed in the high bits of a
6289 // single (8 byte) or double (16 byte) stack slot.
6290 Address OverflowArgAreaPtr =
6291 CGF.Builder.CreateStructGEP(VAListAddr, 2, CharUnits::fromQuantity(16),
6292 "overflow_arg_area_ptr");
6293 Address OverflowArgArea =
6294 Address(CGF.Builder.CreateLoad(OverflowArgAreaPtr, "overflow_arg_area"),
6297 CGF.Builder.CreateElementBitCast(OverflowArgArea, DirectTy, "mem_addr");
6299 // Update overflow_arg_area_ptr pointer
6300 llvm::Value *NewOverflowArgArea =
6301 CGF.Builder.CreateGEP(OverflowArgArea.getPointer(), PaddedSizeV,
6302 "overflow_arg_area");
6303 CGF.Builder.CreateStore(NewOverflowArgArea, OverflowArgAreaPtr);
6308 assert(PaddedSize.getQuantity() == 8);
6310 unsigned MaxRegs, RegCountField, RegSaveIndex;
6311 CharUnits RegPadding;
6313 MaxRegs = 4; // Maximum of 4 FPR arguments
6314 RegCountField = 1; // __fpr
6315 RegSaveIndex = 16; // save offset for f0
6316 RegPadding = CharUnits(); // floats are passed in the high bits of an FPR
6318 MaxRegs = 5; // Maximum of 5 GPR arguments
6319 RegCountField = 0; // __gpr
6320 RegSaveIndex = 2; // save offset for r2
6321 RegPadding = Padding; // values are passed in the low bits of a GPR
6324 Address RegCountPtr = CGF.Builder.CreateStructGEP(
6325 VAListAddr, RegCountField, RegCountField * CharUnits::fromQuantity(8),
6327 llvm::Value *RegCount = CGF.Builder.CreateLoad(RegCountPtr, "reg_count");
6328 llvm::Value *MaxRegsV = llvm::ConstantInt::get(IndexTy, MaxRegs);
6329 llvm::Value *InRegs = CGF.Builder.CreateICmpULT(RegCount, MaxRegsV,
6332 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
6333 llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem");
6334 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
6335 CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock);
6337 // Emit code to load the value if it was passed in registers.
6338 CGF.EmitBlock(InRegBlock);
6340 // Work out the address of an argument register.
6341 llvm::Value *ScaledRegCount =
6342 CGF.Builder.CreateMul(RegCount, PaddedSizeV, "scaled_reg_count");
6343 llvm::Value *RegBase =
6344 llvm::ConstantInt::get(IndexTy, RegSaveIndex * PaddedSize.getQuantity()
6345 + RegPadding.getQuantity());
6346 llvm::Value *RegOffset =
6347 CGF.Builder.CreateAdd(ScaledRegCount, RegBase, "reg_offset");
6348 Address RegSaveAreaPtr =
6349 CGF.Builder.CreateStructGEP(VAListAddr, 3, CharUnits::fromQuantity(24),
6350 "reg_save_area_ptr");
6351 llvm::Value *RegSaveArea =
6352 CGF.Builder.CreateLoad(RegSaveAreaPtr, "reg_save_area");
6353 Address RawRegAddr(CGF.Builder.CreateGEP(RegSaveArea, RegOffset,
6357 CGF.Builder.CreateElementBitCast(RawRegAddr, DirectTy, "reg_addr");
6359 // Update the register count
6360 llvm::Value *One = llvm::ConstantInt::get(IndexTy, 1);
6361 llvm::Value *NewRegCount =
6362 CGF.Builder.CreateAdd(RegCount, One, "reg_count");
6363 CGF.Builder.CreateStore(NewRegCount, RegCountPtr);
6364 CGF.EmitBranch(ContBlock);
6366 // Emit code to load the value if it was passed in memory.
6367 CGF.EmitBlock(InMemBlock);
6369 // Work out the address of a stack argument.
6370 Address OverflowArgAreaPtr = CGF.Builder.CreateStructGEP(
6371 VAListAddr, 2, CharUnits::fromQuantity(16), "overflow_arg_area_ptr");
6372 Address OverflowArgArea =
6373 Address(CGF.Builder.CreateLoad(OverflowArgAreaPtr, "overflow_arg_area"),
6375 Address RawMemAddr =
6376 CGF.Builder.CreateConstByteGEP(OverflowArgArea, Padding, "raw_mem_addr");
6378 CGF.Builder.CreateElementBitCast(RawMemAddr, DirectTy, "mem_addr");
6380 // Update overflow_arg_area_ptr pointer
6381 llvm::Value *NewOverflowArgArea =
6382 CGF.Builder.CreateGEP(OverflowArgArea.getPointer(), PaddedSizeV,
6383 "overflow_arg_area");
6384 CGF.Builder.CreateStore(NewOverflowArgArea, OverflowArgAreaPtr);
6385 CGF.EmitBranch(ContBlock);
6387 // Return the appropriate result.
6388 CGF.EmitBlock(ContBlock);
6389 Address ResAddr = emitMergePHI(CGF, RegAddr, InRegBlock,
6390 MemAddr, InMemBlock, "va_arg.addr");
6393 ResAddr = Address(CGF.Builder.CreateLoad(ResAddr, "indirect_arg"),
6399 ABIArgInfo SystemZABIInfo::classifyReturnType(QualType RetTy) const {
6400 if (RetTy->isVoidType())
6401 return ABIArgInfo::getIgnore();
6402 if (isVectorArgumentType(RetTy))
6403 return ABIArgInfo::getDirect();
6404 if (isCompoundType(RetTy) || getContext().getTypeSize(RetTy) > 64)
6405 return getNaturalAlignIndirect(RetTy);
6406 return (isPromotableIntegerType(RetTy) ?
6407 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
6410 ABIArgInfo SystemZABIInfo::classifyArgumentType(QualType Ty) const {
6411 // Handle the generic C++ ABI.
6412 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
6413 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
6415 // Integers and enums are extended to full register width.
6416 if (isPromotableIntegerType(Ty))
6417 return ABIArgInfo::getExtend();
6419 // Handle vector types and vector-like structure types. Note that
6420 // as opposed to float-like structure types, we do not allow any
6421 // padding for vector-like structures, so verify the sizes match.
6422 uint64_t Size = getContext().getTypeSize(Ty);
6423 QualType SingleElementTy = GetSingleElementType(Ty);
6424 if (isVectorArgumentType(SingleElementTy) &&
6425 getContext().getTypeSize(SingleElementTy) == Size)
6426 return ABIArgInfo::getDirect(CGT.ConvertType(SingleElementTy));
6428 // Values that are not 1, 2, 4 or 8 bytes in size are passed indirectly.
6429 if (Size != 8 && Size != 16 && Size != 32 && Size != 64)
6430 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
6432 // Handle small structures.
6433 if (const RecordType *RT = Ty->getAs<RecordType>()) {
6434 // Structures with flexible arrays have variable length, so really
6435 // fail the size test above.
6436 const RecordDecl *RD = RT->getDecl();
6437 if (RD->hasFlexibleArrayMember())
6438 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
6440 // The structure is passed as an unextended integer, a float, or a double.
6442 if (isFPArgumentType(SingleElementTy)) {
6443 assert(Size == 32 || Size == 64);
6445 PassTy = llvm::Type::getFloatTy(getVMContext());
6447 PassTy = llvm::Type::getDoubleTy(getVMContext());
6449 PassTy = llvm::IntegerType::get(getVMContext(), Size);
6450 return ABIArgInfo::getDirect(PassTy);
6453 // Non-structure compounds are passed indirectly.
6454 if (isCompoundType(Ty))
6455 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
6457 return ABIArgInfo::getDirect(nullptr);
6460 //===----------------------------------------------------------------------===//
6461 // MSP430 ABI Implementation
6462 //===----------------------------------------------------------------------===//
6466 class MSP430TargetCodeGenInfo : public TargetCodeGenInfo {
6468 MSP430TargetCodeGenInfo(CodeGenTypes &CGT)
6469 : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {}
6470 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
6471 CodeGen::CodeGenModule &M) const override;
6476 void MSP430TargetCodeGenInfo::setTargetAttributes(const Decl *D,
6477 llvm::GlobalValue *GV,
6478 CodeGen::CodeGenModule &M) const {
6479 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
6480 if (const MSP430InterruptAttr *attr = FD->getAttr<MSP430InterruptAttr>()) {
6481 // Handle 'interrupt' attribute:
6482 llvm::Function *F = cast<llvm::Function>(GV);
6484 // Step 1: Set ISR calling convention.
6485 F->setCallingConv(llvm::CallingConv::MSP430_INTR);
6487 // Step 2: Add attributes goodness.
6488 F->addFnAttr(llvm::Attribute::NoInline);
6490 // Step 3: Emit ISR vector alias.
6491 unsigned Num = attr->getNumber() / 2;
6492 llvm::GlobalAlias::create(llvm::Function::ExternalLinkage,
6493 "__isr_" + Twine(Num), F);
6498 //===----------------------------------------------------------------------===//
6499 // MIPS ABI Implementation. This works for both little-endian and
6500 // big-endian variants.
6501 //===----------------------------------------------------------------------===//
6504 class MipsABIInfo : public ABIInfo {
6506 unsigned MinABIStackAlignInBytes, StackAlignInBytes;
6507 void CoerceToIntArgs(uint64_t TySize,
6508 SmallVectorImpl<llvm::Type *> &ArgList) const;
6509 llvm::Type* HandleAggregates(QualType Ty, uint64_t TySize) const;
6510 llvm::Type* returnAggregateInRegs(QualType RetTy, uint64_t Size) const;
6511 llvm::Type* getPaddingType(uint64_t Align, uint64_t Offset) const;
6513 MipsABIInfo(CodeGenTypes &CGT, bool _IsO32) :
6514 ABIInfo(CGT), IsO32(_IsO32), MinABIStackAlignInBytes(IsO32 ? 4 : 8),
6515 StackAlignInBytes(IsO32 ? 8 : 16) {}
6517 ABIArgInfo classifyReturnType(QualType RetTy) const;
6518 ABIArgInfo classifyArgumentType(QualType RetTy, uint64_t &Offset) const;
6519 void computeInfo(CGFunctionInfo &FI) const override;
6520 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
6521 QualType Ty) const override;
6522 bool shouldSignExtUnsignedType(QualType Ty) const override;
6525 class MIPSTargetCodeGenInfo : public TargetCodeGenInfo {
6526 unsigned SizeOfUnwindException;
6528 MIPSTargetCodeGenInfo(CodeGenTypes &CGT, bool IsO32)
6529 : TargetCodeGenInfo(new MipsABIInfo(CGT, IsO32)),
6530 SizeOfUnwindException(IsO32 ? 24 : 32) {}
6532 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
6536 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
6537 CodeGen::CodeGenModule &CGM) const override {
6538 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
6540 llvm::Function *Fn = cast<llvm::Function>(GV);
6541 if (FD->hasAttr<Mips16Attr>()) {
6542 Fn->addFnAttr("mips16");
6544 else if (FD->hasAttr<NoMips16Attr>()) {
6545 Fn->addFnAttr("nomips16");
6548 const MipsInterruptAttr *Attr = FD->getAttr<MipsInterruptAttr>();
6553 switch (Attr->getInterrupt()) {
6554 case MipsInterruptAttr::eic: Kind = "eic"; break;
6555 case MipsInterruptAttr::sw0: Kind = "sw0"; break;
6556 case MipsInterruptAttr::sw1: Kind = "sw1"; break;
6557 case MipsInterruptAttr::hw0: Kind = "hw0"; break;
6558 case MipsInterruptAttr::hw1: Kind = "hw1"; break;
6559 case MipsInterruptAttr::hw2: Kind = "hw2"; break;
6560 case MipsInterruptAttr::hw3: Kind = "hw3"; break;
6561 case MipsInterruptAttr::hw4: Kind = "hw4"; break;
6562 case MipsInterruptAttr::hw5: Kind = "hw5"; break;
6565 Fn->addFnAttr("interrupt", Kind);
6569 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
6570 llvm::Value *Address) const override;
6572 unsigned getSizeOfUnwindException() const override {
6573 return SizeOfUnwindException;
6578 void MipsABIInfo::CoerceToIntArgs(
6579 uint64_t TySize, SmallVectorImpl<llvm::Type *> &ArgList) const {
6580 llvm::IntegerType *IntTy =
6581 llvm::IntegerType::get(getVMContext(), MinABIStackAlignInBytes * 8);
6583 // Add (TySize / MinABIStackAlignInBytes) args of IntTy.
6584 for (unsigned N = TySize / (MinABIStackAlignInBytes * 8); N; --N)
6585 ArgList.push_back(IntTy);
6587 // If necessary, add one more integer type to ArgList.
6588 unsigned R = TySize % (MinABIStackAlignInBytes * 8);
6591 ArgList.push_back(llvm::IntegerType::get(getVMContext(), R));
6594 // In N32/64, an aligned double precision floating point field is passed in
6596 llvm::Type* MipsABIInfo::HandleAggregates(QualType Ty, uint64_t TySize) const {
6597 SmallVector<llvm::Type*, 8> ArgList, IntArgList;
6600 CoerceToIntArgs(TySize, ArgList);
6601 return llvm::StructType::get(getVMContext(), ArgList);
6604 if (Ty->isComplexType())
6605 return CGT.ConvertType(Ty);
6607 const RecordType *RT = Ty->getAs<RecordType>();
6609 // Unions/vectors are passed in integer registers.
6610 if (!RT || !RT->isStructureOrClassType()) {
6611 CoerceToIntArgs(TySize, ArgList);
6612 return llvm::StructType::get(getVMContext(), ArgList);
6615 const RecordDecl *RD = RT->getDecl();
6616 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
6617 assert(!(TySize % 8) && "Size of structure must be multiple of 8.");
6619 uint64_t LastOffset = 0;
6621 llvm::IntegerType *I64 = llvm::IntegerType::get(getVMContext(), 64);
6623 // Iterate over fields in the struct/class and check if there are any aligned
6625 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
6626 i != e; ++i, ++idx) {
6627 const QualType Ty = i->getType();
6628 const BuiltinType *BT = Ty->getAs<BuiltinType>();
6630 if (!BT || BT->getKind() != BuiltinType::Double)
6633 uint64_t Offset = Layout.getFieldOffset(idx);
6634 if (Offset % 64) // Ignore doubles that are not aligned.
6637 // Add ((Offset - LastOffset) / 64) args of type i64.
6638 for (unsigned j = (Offset - LastOffset) / 64; j > 0; --j)
6639 ArgList.push_back(I64);
6642 ArgList.push_back(llvm::Type::getDoubleTy(getVMContext()));
6643 LastOffset = Offset + 64;
6646 CoerceToIntArgs(TySize - LastOffset, IntArgList);
6647 ArgList.append(IntArgList.begin(), IntArgList.end());
6649 return llvm::StructType::get(getVMContext(), ArgList);
6652 llvm::Type *MipsABIInfo::getPaddingType(uint64_t OrigOffset,
6653 uint64_t Offset) const {
6654 if (OrigOffset + MinABIStackAlignInBytes > Offset)
6657 return llvm::IntegerType::get(getVMContext(), (Offset - OrigOffset) * 8);
6661 MipsABIInfo::classifyArgumentType(QualType Ty, uint64_t &Offset) const {
6662 Ty = useFirstFieldIfTransparentUnion(Ty);
6664 uint64_t OrigOffset = Offset;
6665 uint64_t TySize = getContext().getTypeSize(Ty);
6666 uint64_t Align = getContext().getTypeAlign(Ty) / 8;
6668 Align = std::min(std::max(Align, (uint64_t)MinABIStackAlignInBytes),
6669 (uint64_t)StackAlignInBytes);
6670 unsigned CurrOffset = llvm::alignTo(Offset, Align);
6671 Offset = CurrOffset + llvm::alignTo(TySize, Align * 8) / 8;
6673 if (isAggregateTypeForABI(Ty) || Ty->isVectorType()) {
6674 // Ignore empty aggregates.
6676 return ABIArgInfo::getIgnore();
6678 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) {
6679 Offset = OrigOffset + MinABIStackAlignInBytes;
6680 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
6683 // If we have reached here, aggregates are passed directly by coercing to
6684 // another structure type. Padding is inserted if the offset of the
6685 // aggregate is unaligned.
6686 ABIArgInfo ArgInfo =
6687 ABIArgInfo::getDirect(HandleAggregates(Ty, TySize), 0,
6688 getPaddingType(OrigOffset, CurrOffset));
6689 ArgInfo.setInReg(true);
6693 // Treat an enum type as its underlying type.
6694 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
6695 Ty = EnumTy->getDecl()->getIntegerType();
6697 // All integral types are promoted to the GPR width.
6698 if (Ty->isIntegralOrEnumerationType())
6699 return ABIArgInfo::getExtend();
6701 return ABIArgInfo::getDirect(
6702 nullptr, 0, IsO32 ? nullptr : getPaddingType(OrigOffset, CurrOffset));
6706 MipsABIInfo::returnAggregateInRegs(QualType RetTy, uint64_t Size) const {
6707 const RecordType *RT = RetTy->getAs<RecordType>();
6708 SmallVector<llvm::Type*, 8> RTList;
6710 if (RT && RT->isStructureOrClassType()) {
6711 const RecordDecl *RD = RT->getDecl();
6712 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
6713 unsigned FieldCnt = Layout.getFieldCount();
6715 // N32/64 returns struct/classes in floating point registers if the
6716 // following conditions are met:
6717 // 1. The size of the struct/class is no larger than 128-bit.
6718 // 2. The struct/class has one or two fields all of which are floating
6720 // 3. The offset of the first field is zero (this follows what gcc does).
6722 // Any other composite results are returned in integer registers.
6724 if (FieldCnt && (FieldCnt <= 2) && !Layout.getFieldOffset(0)) {
6725 RecordDecl::field_iterator b = RD->field_begin(), e = RD->field_end();
6726 for (; b != e; ++b) {
6727 const BuiltinType *BT = b->getType()->getAs<BuiltinType>();
6729 if (!BT || !BT->isFloatingPoint())
6732 RTList.push_back(CGT.ConvertType(b->getType()));
6736 return llvm::StructType::get(getVMContext(), RTList,
6737 RD->hasAttr<PackedAttr>());
6743 CoerceToIntArgs(Size, RTList);
6744 return llvm::StructType::get(getVMContext(), RTList);
6747 ABIArgInfo MipsABIInfo::classifyReturnType(QualType RetTy) const {
6748 uint64_t Size = getContext().getTypeSize(RetTy);
6750 if (RetTy->isVoidType())
6751 return ABIArgInfo::getIgnore();
6753 // O32 doesn't treat zero-sized structs differently from other structs.
6754 // However, N32/N64 ignores zero sized return values.
6755 if (!IsO32 && Size == 0)
6756 return ABIArgInfo::getIgnore();
6758 if (isAggregateTypeForABI(RetTy) || RetTy->isVectorType()) {
6760 if (RetTy->isAnyComplexType())
6761 return ABIArgInfo::getDirect();
6763 // O32 returns integer vectors in registers and N32/N64 returns all small
6764 // aggregates in registers.
6766 (RetTy->isVectorType() && !RetTy->hasFloatingRepresentation())) {
6767 ABIArgInfo ArgInfo =
6768 ABIArgInfo::getDirect(returnAggregateInRegs(RetTy, Size));
6769 ArgInfo.setInReg(true);
6774 return getNaturalAlignIndirect(RetTy);
6777 // Treat an enum type as its underlying type.
6778 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
6779 RetTy = EnumTy->getDecl()->getIntegerType();
6781 return (RetTy->isPromotableIntegerType() ?
6782 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
6785 void MipsABIInfo::computeInfo(CGFunctionInfo &FI) const {
6786 ABIArgInfo &RetInfo = FI.getReturnInfo();
6787 if (!getCXXABI().classifyReturnType(FI))
6788 RetInfo = classifyReturnType(FI.getReturnType());
6790 // Check if a pointer to an aggregate is passed as a hidden argument.
6791 uint64_t Offset = RetInfo.isIndirect() ? MinABIStackAlignInBytes : 0;
6793 for (auto &I : FI.arguments())
6794 I.info = classifyArgumentType(I.type, Offset);
6797 Address MipsABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
6798 QualType OrigTy) const {
6799 QualType Ty = OrigTy;
6801 // Integer arguments are promoted to 32-bit on O32 and 64-bit on N32/N64.
6802 // Pointers are also promoted in the same way but this only matters for N32.
6803 unsigned SlotSizeInBits = IsO32 ? 32 : 64;
6804 unsigned PtrWidth = getTarget().getPointerWidth(0);
6805 bool DidPromote = false;
6806 if ((Ty->isIntegerType() &&
6807 getContext().getIntWidth(Ty) < SlotSizeInBits) ||
6808 (Ty->isPointerType() && PtrWidth < SlotSizeInBits)) {
6810 Ty = getContext().getIntTypeForBitwidth(SlotSizeInBits,
6811 Ty->isSignedIntegerType());
6814 auto TyInfo = getContext().getTypeInfoInChars(Ty);
6816 // The alignment of things in the argument area is never larger than
6817 // StackAlignInBytes.
6819 std::min(TyInfo.second, CharUnits::fromQuantity(StackAlignInBytes));
6821 // MinABIStackAlignInBytes is the size of argument slots on the stack.
6822 CharUnits ArgSlotSize = CharUnits::fromQuantity(MinABIStackAlignInBytes);
6824 Address Addr = emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*indirect*/ false,
6825 TyInfo, ArgSlotSize, /*AllowHigherAlign*/ true);
6828 // If there was a promotion, "unpromote" into a temporary.
6829 // TODO: can we just use a pointer into a subset of the original slot?
6831 Address Temp = CGF.CreateMemTemp(OrigTy, "vaarg.promotion-temp");
6832 llvm::Value *Promoted = CGF.Builder.CreateLoad(Addr);
6834 // Truncate down to the right width.
6835 llvm::Type *IntTy = (OrigTy->isIntegerType() ? Temp.getElementType()
6837 llvm::Value *V = CGF.Builder.CreateTrunc(Promoted, IntTy);
6838 if (OrigTy->isPointerType())
6839 V = CGF.Builder.CreateIntToPtr(V, Temp.getElementType());
6841 CGF.Builder.CreateStore(V, Temp);
6848 bool MipsABIInfo::shouldSignExtUnsignedType(QualType Ty) const {
6849 int TySize = getContext().getTypeSize(Ty);
6851 // MIPS64 ABI requires unsigned 32 bit integers to be sign extended.
6852 if (Ty->isUnsignedIntegerOrEnumerationType() && TySize == 32)
6859 MIPSTargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
6860 llvm::Value *Address) const {
6861 // This information comes from gcc's implementation, which seems to
6862 // as canonical as it gets.
6864 // Everything on MIPS is 4 bytes. Double-precision FP registers
6865 // are aliased to pairs of single-precision FP registers.
6866 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4);
6868 // 0-31 are the general purpose registers, $0 - $31.
6869 // 32-63 are the floating-point registers, $f0 - $f31.
6870 // 64 and 65 are the multiply/divide registers, $hi and $lo.
6871 // 66 is the (notional, I think) register for signal-handler return.
6872 AssignToArrayRange(CGF.Builder, Address, Four8, 0, 65);
6874 // 67-74 are the floating-point status registers, $fcc0 - $fcc7.
6875 // They are one bit wide and ignored here.
6877 // 80-111 are the coprocessor 0 registers, $c0r0 - $c0r31.
6878 // (coprocessor 1 is the FP unit)
6879 // 112-143 are the coprocessor 2 registers, $c2r0 - $c2r31.
6880 // 144-175 are the coprocessor 3 registers, $c3r0 - $c3r31.
6881 // 176-181 are the DSP accumulator registers.
6882 AssignToArrayRange(CGF.Builder, Address, Four8, 80, 181);
6886 //===----------------------------------------------------------------------===//
6887 // TCE ABI Implementation (see http://tce.cs.tut.fi). Uses mostly the defaults.
6888 // Currently subclassed only to implement custom OpenCL C function attribute
6890 //===----------------------------------------------------------------------===//
6894 class TCETargetCodeGenInfo : public DefaultTargetCodeGenInfo {
6896 TCETargetCodeGenInfo(CodeGenTypes &CGT)
6897 : DefaultTargetCodeGenInfo(CGT) {}
6899 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
6900 CodeGen::CodeGenModule &M) const override;
6903 void TCETargetCodeGenInfo::setTargetAttributes(
6904 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const {
6905 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
6908 llvm::Function *F = cast<llvm::Function>(GV);
6910 if (M.getLangOpts().OpenCL) {
6911 if (FD->hasAttr<OpenCLKernelAttr>()) {
6912 // OpenCL C Kernel functions are not subject to inlining
6913 F->addFnAttr(llvm::Attribute::NoInline);
6914 const ReqdWorkGroupSizeAttr *Attr = FD->getAttr<ReqdWorkGroupSizeAttr>();
6916 // Convert the reqd_work_group_size() attributes to metadata.
6917 llvm::LLVMContext &Context = F->getContext();
6918 llvm::NamedMDNode *OpenCLMetadata =
6919 M.getModule().getOrInsertNamedMetadata(
6920 "opencl.kernel_wg_size_info");
6922 SmallVector<llvm::Metadata *, 5> Operands;
6923 Operands.push_back(llvm::ConstantAsMetadata::get(F));
6926 llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue(
6927 M.Int32Ty, llvm::APInt(32, Attr->getXDim()))));
6929 llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue(
6930 M.Int32Ty, llvm::APInt(32, Attr->getYDim()))));
6932 llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue(
6933 M.Int32Ty, llvm::APInt(32, Attr->getZDim()))));
6935 // Add a boolean constant operand for "required" (true) or "hint"
6936 // (false) for implementing the work_group_size_hint attr later.
6937 // Currently always true as the hint is not yet implemented.
6939 llvm::ConstantAsMetadata::get(llvm::ConstantInt::getTrue(Context)));
6940 OpenCLMetadata->addOperand(llvm::MDNode::get(Context, Operands));
6948 //===----------------------------------------------------------------------===//
6949 // Hexagon ABI Implementation
6950 //===----------------------------------------------------------------------===//
6954 class HexagonABIInfo : public ABIInfo {
6958 HexagonABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
6962 ABIArgInfo classifyReturnType(QualType RetTy) const;
6963 ABIArgInfo classifyArgumentType(QualType RetTy) const;
6965 void computeInfo(CGFunctionInfo &FI) const override;
6967 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
6968 QualType Ty) const override;
6971 class HexagonTargetCodeGenInfo : public TargetCodeGenInfo {
6973 HexagonTargetCodeGenInfo(CodeGenTypes &CGT)
6974 :TargetCodeGenInfo(new HexagonABIInfo(CGT)) {}
6976 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
6983 void HexagonABIInfo::computeInfo(CGFunctionInfo &FI) const {
6984 if (!getCXXABI().classifyReturnType(FI))
6985 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
6986 for (auto &I : FI.arguments())
6987 I.info = classifyArgumentType(I.type);
6990 ABIArgInfo HexagonABIInfo::classifyArgumentType(QualType Ty) const {
6991 if (!isAggregateTypeForABI(Ty)) {
6992 // Treat an enum type as its underlying type.
6993 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
6994 Ty = EnumTy->getDecl()->getIntegerType();
6996 return (Ty->isPromotableIntegerType() ?
6997 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
7000 // Ignore empty records.
7001 if (isEmptyRecord(getContext(), Ty, true))
7002 return ABIArgInfo::getIgnore();
7004 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
7005 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
7007 uint64_t Size = getContext().getTypeSize(Ty);
7009 return getNaturalAlignIndirect(Ty, /*ByVal=*/true);
7010 // Pass in the smallest viable integer type.
7012 return ABIArgInfo::getDirect(llvm::Type::getInt64Ty(getVMContext()));
7014 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
7016 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
7018 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
7021 ABIArgInfo HexagonABIInfo::classifyReturnType(QualType RetTy) const {
7022 if (RetTy->isVoidType())
7023 return ABIArgInfo::getIgnore();
7025 // Large vector types should be returned via memory.
7026 if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 64)
7027 return getNaturalAlignIndirect(RetTy);
7029 if (!isAggregateTypeForABI(RetTy)) {
7030 // Treat an enum type as its underlying type.
7031 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
7032 RetTy = EnumTy->getDecl()->getIntegerType();
7034 return (RetTy->isPromotableIntegerType() ?
7035 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
7038 if (isEmptyRecord(getContext(), RetTy, true))
7039 return ABIArgInfo::getIgnore();
7041 // Aggregates <= 8 bytes are returned in r0; other aggregates
7042 // are returned indirectly.
7043 uint64_t Size = getContext().getTypeSize(RetTy);
7045 // Return in the smallest viable integer type.
7047 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
7049 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
7051 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
7052 return ABIArgInfo::getDirect(llvm::Type::getInt64Ty(getVMContext()));
7055 return getNaturalAlignIndirect(RetTy, /*ByVal=*/true);
7058 Address HexagonABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
7059 QualType Ty) const {
7060 // FIXME: Someone needs to audit that this handle alignment correctly.
7061 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*indirect*/ false,
7062 getContext().getTypeInfoInChars(Ty),
7063 CharUnits::fromQuantity(4),
7064 /*AllowHigherAlign*/ true);
7067 //===----------------------------------------------------------------------===//
7068 // Lanai ABI Implementation
7069 //===----------------------------------------------------------------------===//
7072 class LanaiABIInfo : public DefaultABIInfo {
7074 LanaiABIInfo(CodeGen::CodeGenTypes &CGT) : DefaultABIInfo(CGT) {}
7076 bool shouldUseInReg(QualType Ty, CCState &State) const;
7078 void computeInfo(CGFunctionInfo &FI) const override {
7079 CCState State(FI.getCallingConvention());
7080 // Lanai uses 4 registers to pass arguments unless the function has the
7081 // regparm attribute set.
7082 if (FI.getHasRegParm()) {
7083 State.FreeRegs = FI.getRegParm();
7088 if (!getCXXABI().classifyReturnType(FI))
7089 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
7090 for (auto &I : FI.arguments())
7091 I.info = classifyArgumentType(I.type, State);
7094 ABIArgInfo getIndirectResult(QualType Ty, bool ByVal, CCState &State) const;
7095 ABIArgInfo classifyArgumentType(QualType RetTy, CCState &State) const;
7097 } // end anonymous namespace
7099 bool LanaiABIInfo::shouldUseInReg(QualType Ty, CCState &State) const {
7100 unsigned Size = getContext().getTypeSize(Ty);
7101 unsigned SizeInRegs = llvm::alignTo(Size, 32U) / 32U;
7103 if (SizeInRegs == 0)
7106 if (SizeInRegs > State.FreeRegs) {
7111 State.FreeRegs -= SizeInRegs;
7116 ABIArgInfo LanaiABIInfo::getIndirectResult(QualType Ty, bool ByVal,
7117 CCState &State) const {
7119 if (State.FreeRegs) {
7120 --State.FreeRegs; // Non-byval indirects just use one pointer.
7121 return getNaturalAlignIndirectInReg(Ty);
7123 return getNaturalAlignIndirect(Ty, false);
7126 // Compute the byval alignment.
7127 const unsigned MinABIStackAlignInBytes = 4;
7128 unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8;
7129 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(4), /*ByVal=*/true,
7130 /*Realign=*/TypeAlign >
7131 MinABIStackAlignInBytes);
7134 ABIArgInfo LanaiABIInfo::classifyArgumentType(QualType Ty,
7135 CCState &State) const {
7136 // Check with the C++ ABI first.
7137 const RecordType *RT = Ty->getAs<RecordType>();
7139 CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI());
7140 if (RAA == CGCXXABI::RAA_Indirect) {
7141 return getIndirectResult(Ty, /*ByVal=*/false, State);
7142 } else if (RAA == CGCXXABI::RAA_DirectInMemory) {
7143 return getNaturalAlignIndirect(Ty, /*ByRef=*/true);
7147 if (isAggregateTypeForABI(Ty)) {
7148 // Structures with flexible arrays are always indirect.
7149 if (RT && RT->getDecl()->hasFlexibleArrayMember())
7150 return getIndirectResult(Ty, /*ByVal=*/true, State);
7152 // Ignore empty structs/unions.
7153 if (isEmptyRecord(getContext(), Ty, true))
7154 return ABIArgInfo::getIgnore();
7156 llvm::LLVMContext &LLVMContext = getVMContext();
7157 unsigned SizeInRegs = (getContext().getTypeSize(Ty) + 31) / 32;
7158 if (SizeInRegs <= State.FreeRegs) {
7159 llvm::IntegerType *Int32 = llvm::Type::getInt32Ty(LLVMContext);
7160 SmallVector<llvm::Type *, 3> Elements(SizeInRegs, Int32);
7161 llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements);
7162 State.FreeRegs -= SizeInRegs;
7163 return ABIArgInfo::getDirectInReg(Result);
7167 return getIndirectResult(Ty, true, State);
7170 // Treat an enum type as its underlying type.
7171 if (const auto *EnumTy = Ty->getAs<EnumType>())
7172 Ty = EnumTy->getDecl()->getIntegerType();
7174 bool InReg = shouldUseInReg(Ty, State);
7175 if (Ty->isPromotableIntegerType()) {
7177 return ABIArgInfo::getDirectInReg();
7178 return ABIArgInfo::getExtend();
7181 return ABIArgInfo::getDirectInReg();
7182 return ABIArgInfo::getDirect();
7186 class LanaiTargetCodeGenInfo : public TargetCodeGenInfo {
7188 LanaiTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
7189 : TargetCodeGenInfo(new LanaiABIInfo(CGT)) {}
7193 //===----------------------------------------------------------------------===//
7194 // AMDGPU ABI Implementation
7195 //===----------------------------------------------------------------------===//
7199 class AMDGPUABIInfo final : public DefaultABIInfo {
7201 explicit AMDGPUABIInfo(CodeGen::CodeGenTypes &CGT) : DefaultABIInfo(CGT) {}
7204 ABIArgInfo classifyArgumentType(QualType Ty) const;
7206 void computeInfo(CGFunctionInfo &FI) const override;
7209 void AMDGPUABIInfo::computeInfo(CGFunctionInfo &FI) const {
7210 if (!getCXXABI().classifyReturnType(FI))
7211 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
7213 unsigned CC = FI.getCallingConvention();
7214 for (auto &Arg : FI.arguments())
7215 if (CC == llvm::CallingConv::AMDGPU_KERNEL)
7216 Arg.info = classifyArgumentType(Arg.type);
7218 Arg.info = DefaultABIInfo::classifyArgumentType(Arg.type);
7221 /// \brief Classify argument of given type \p Ty.
7222 ABIArgInfo AMDGPUABIInfo::classifyArgumentType(QualType Ty) const {
7223 llvm::StructType *StrTy = dyn_cast<llvm::StructType>(CGT.ConvertType(Ty));
7225 return DefaultABIInfo::classifyArgumentType(Ty);
7228 // Coerce single element structs to its element.
7229 if (StrTy->getNumElements() == 1) {
7230 return ABIArgInfo::getDirect();
7233 // If we set CanBeFlattened to true, CodeGen will expand the struct to its
7234 // individual elements, which confuses the Clover OpenCL backend; therefore we
7235 // have to set it to false here. Other args of getDirect() are just defaults.
7236 return ABIArgInfo::getDirect(nullptr, 0, nullptr, false);
7239 class AMDGPUTargetCodeGenInfo : public TargetCodeGenInfo {
7241 AMDGPUTargetCodeGenInfo(CodeGenTypes &CGT)
7242 : TargetCodeGenInfo(new AMDGPUABIInfo(CGT)) {}
7243 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
7244 CodeGen::CodeGenModule &M) const override;
7245 unsigned getOpenCLKernelCallingConv() const override;
7247 llvm::Constant *getNullPointer(const CodeGen::CodeGenModule &CGM,
7248 llvm::PointerType *T, QualType QT) const override;
7252 static void appendOpenCLVersionMD (CodeGen::CodeGenModule &CGM);
7254 void AMDGPUTargetCodeGenInfo::setTargetAttributes(
7256 llvm::GlobalValue *GV,
7257 CodeGen::CodeGenModule &M) const {
7258 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
7262 llvm::Function *F = cast<llvm::Function>(GV);
7264 if (const auto *Attr = FD->getAttr<AMDGPUFlatWorkGroupSizeAttr>()) {
7265 unsigned Min = Attr->getMin();
7266 unsigned Max = Attr->getMax();
7269 assert(Min <= Max && "Min must be less than or equal Max");
7271 std::string AttrVal = llvm::utostr(Min) + "," + llvm::utostr(Max);
7272 F->addFnAttr("amdgpu-flat-work-group-size", AttrVal);
7274 assert(Max == 0 && "Max must be zero");
7277 if (const auto *Attr = FD->getAttr<AMDGPUWavesPerEUAttr>()) {
7278 unsigned Min = Attr->getMin();
7279 unsigned Max = Attr->getMax();
7282 assert((Max == 0 || Min <= Max) && "Min must be less than or equal Max");
7284 std::string AttrVal = llvm::utostr(Min);
7286 AttrVal = AttrVal + "," + llvm::utostr(Max);
7287 F->addFnAttr("amdgpu-waves-per-eu", AttrVal);
7289 assert(Max == 0 && "Max must be zero");
7292 if (const auto *Attr = FD->getAttr<AMDGPUNumSGPRAttr>()) {
7293 unsigned NumSGPR = Attr->getNumSGPR();
7296 F->addFnAttr("amdgpu-num-sgpr", llvm::utostr(NumSGPR));
7299 if (const auto *Attr = FD->getAttr<AMDGPUNumVGPRAttr>()) {
7300 uint32_t NumVGPR = Attr->getNumVGPR();
7303 F->addFnAttr("amdgpu-num-vgpr", llvm::utostr(NumVGPR));
7306 appendOpenCLVersionMD(M);
7309 unsigned AMDGPUTargetCodeGenInfo::getOpenCLKernelCallingConv() const {
7310 return llvm::CallingConv::AMDGPU_KERNEL;
7313 // Currently LLVM assumes null pointers always have value 0,
7314 // which results in incorrectly transformed IR. Therefore, instead of
7315 // emitting null pointers in private and local address spaces, a null
7316 // pointer in generic address space is emitted which is casted to a
7317 // pointer in local or private address space.
7318 llvm::Constant *AMDGPUTargetCodeGenInfo::getNullPointer(
7319 const CodeGen::CodeGenModule &CGM, llvm::PointerType *PT,
7320 QualType QT) const {
7321 if (CGM.getContext().getTargetNullPointerValue(QT) == 0)
7322 return llvm::ConstantPointerNull::get(PT);
7324 auto &Ctx = CGM.getContext();
7325 auto NPT = llvm::PointerType::get(PT->getElementType(),
7326 Ctx.getTargetAddressSpace(LangAS::opencl_generic));
7327 return llvm::ConstantExpr::getAddrSpaceCast(
7328 llvm::ConstantPointerNull::get(NPT), PT);
7331 //===----------------------------------------------------------------------===//
7332 // SPARC v8 ABI Implementation.
7333 // Based on the SPARC Compliance Definition version 2.4.1.
7335 // Ensures that complex values are passed in registers.
7338 class SparcV8ABIInfo : public DefaultABIInfo {
7340 SparcV8ABIInfo(CodeGenTypes &CGT) : DefaultABIInfo(CGT) {}
7343 ABIArgInfo classifyReturnType(QualType RetTy) const;
7344 void computeInfo(CGFunctionInfo &FI) const override;
7346 } // end anonymous namespace
7350 SparcV8ABIInfo::classifyReturnType(QualType Ty) const {
7351 if (Ty->isAnyComplexType()) {
7352 return ABIArgInfo::getDirect();
7355 return DefaultABIInfo::classifyReturnType(Ty);
7359 void SparcV8ABIInfo::computeInfo(CGFunctionInfo &FI) const {
7361 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
7362 for (auto &Arg : FI.arguments())
7363 Arg.info = classifyArgumentType(Arg.type);
7367 class SparcV8TargetCodeGenInfo : public TargetCodeGenInfo {
7369 SparcV8TargetCodeGenInfo(CodeGenTypes &CGT)
7370 : TargetCodeGenInfo(new SparcV8ABIInfo(CGT)) {}
7372 } // end anonymous namespace
7374 //===----------------------------------------------------------------------===//
7375 // SPARC v9 ABI Implementation.
7376 // Based on the SPARC Compliance Definition version 2.4.1.
7378 // Function arguments a mapped to a nominal "parameter array" and promoted to
7379 // registers depending on their type. Each argument occupies 8 or 16 bytes in
7380 // the array, structs larger than 16 bytes are passed indirectly.
7382 // One case requires special care:
7389 // When a struct mixed is passed by value, it only occupies 8 bytes in the
7390 // parameter array, but the int is passed in an integer register, and the float
7391 // is passed in a floating point register. This is represented as two arguments
7392 // with the LLVM IR inreg attribute:
7394 // declare void f(i32 inreg %i, float inreg %f)
7396 // The code generator will only allocate 4 bytes from the parameter array for
7397 // the inreg arguments. All other arguments are allocated a multiple of 8
7401 class SparcV9ABIInfo : public ABIInfo {
7403 SparcV9ABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
7406 ABIArgInfo classifyType(QualType RetTy, unsigned SizeLimit) const;
7407 void computeInfo(CGFunctionInfo &FI) const override;
7408 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
7409 QualType Ty) const override;
7411 // Coercion type builder for structs passed in registers. The coercion type
7412 // serves two purposes:
7414 // 1. Pad structs to a multiple of 64 bits, so they are passed 'left-aligned'
7416 // 2. Expose aligned floating point elements as first-level elements, so the
7417 // code generator knows to pass them in floating point registers.
7419 // We also compute the InReg flag which indicates that the struct contains
7420 // aligned 32-bit floats.
7422 struct CoerceBuilder {
7423 llvm::LLVMContext &Context;
7424 const llvm::DataLayout &DL;
7425 SmallVector<llvm::Type*, 8> Elems;
7429 CoerceBuilder(llvm::LLVMContext &c, const llvm::DataLayout &dl)
7430 : Context(c), DL(dl), Size(0), InReg(false) {}
7432 // Pad Elems with integers until Size is ToSize.
7433 void pad(uint64_t ToSize) {
7434 assert(ToSize >= Size && "Cannot remove elements");
7438 // Finish the current 64-bit word.
7439 uint64_t Aligned = llvm::alignTo(Size, 64);
7440 if (Aligned > Size && Aligned <= ToSize) {
7441 Elems.push_back(llvm::IntegerType::get(Context, Aligned - Size));
7445 // Add whole 64-bit words.
7446 while (Size + 64 <= ToSize) {
7447 Elems.push_back(llvm::Type::getInt64Ty(Context));
7451 // Final in-word padding.
7452 if (Size < ToSize) {
7453 Elems.push_back(llvm::IntegerType::get(Context, ToSize - Size));
7458 // Add a floating point element at Offset.
7459 void addFloat(uint64_t Offset, llvm::Type *Ty, unsigned Bits) {
7460 // Unaligned floats are treated as integers.
7463 // The InReg flag is only required if there are any floats < 64 bits.
7467 Elems.push_back(Ty);
7468 Size = Offset + Bits;
7471 // Add a struct type to the coercion type, starting at Offset (in bits).
7472 void addStruct(uint64_t Offset, llvm::StructType *StrTy) {
7473 const llvm::StructLayout *Layout = DL.getStructLayout(StrTy);
7474 for (unsigned i = 0, e = StrTy->getNumElements(); i != e; ++i) {
7475 llvm::Type *ElemTy = StrTy->getElementType(i);
7476 uint64_t ElemOffset = Offset + Layout->getElementOffsetInBits(i);
7477 switch (ElemTy->getTypeID()) {
7478 case llvm::Type::StructTyID:
7479 addStruct(ElemOffset, cast<llvm::StructType>(ElemTy));
7481 case llvm::Type::FloatTyID:
7482 addFloat(ElemOffset, ElemTy, 32);
7484 case llvm::Type::DoubleTyID:
7485 addFloat(ElemOffset, ElemTy, 64);
7487 case llvm::Type::FP128TyID:
7488 addFloat(ElemOffset, ElemTy, 128);
7490 case llvm::Type::PointerTyID:
7491 if (ElemOffset % 64 == 0) {
7493 Elems.push_back(ElemTy);
7503 // Check if Ty is a usable substitute for the coercion type.
7504 bool isUsableType(llvm::StructType *Ty) const {
7505 return llvm::makeArrayRef(Elems) == Ty->elements();
7508 // Get the coercion type as a literal struct type.
7509 llvm::Type *getType() const {
7510 if (Elems.size() == 1)
7511 return Elems.front();
7513 return llvm::StructType::get(Context, Elems);
7517 } // end anonymous namespace
7520 SparcV9ABIInfo::classifyType(QualType Ty, unsigned SizeLimit) const {
7521 if (Ty->isVoidType())
7522 return ABIArgInfo::getIgnore();
7524 uint64_t Size = getContext().getTypeSize(Ty);
7526 // Anything too big to fit in registers is passed with an explicit indirect
7527 // pointer / sret pointer.
7528 if (Size > SizeLimit)
7529 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
7531 // Treat an enum type as its underlying type.
7532 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
7533 Ty = EnumTy->getDecl()->getIntegerType();
7535 // Integer types smaller than a register are extended.
7536 if (Size < 64 && Ty->isIntegerType())
7537 return ABIArgInfo::getExtend();
7539 // Other non-aggregates go in registers.
7540 if (!isAggregateTypeForABI(Ty))
7541 return ABIArgInfo::getDirect();
7543 // If a C++ object has either a non-trivial copy constructor or a non-trivial
7544 // destructor, it is passed with an explicit indirect pointer / sret pointer.
7545 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
7546 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
7548 // This is a small aggregate type that should be passed in registers.
7549 // Build a coercion type from the LLVM struct type.
7550 llvm::StructType *StrTy = dyn_cast<llvm::StructType>(CGT.ConvertType(Ty));
7552 return ABIArgInfo::getDirect();
7554 CoerceBuilder CB(getVMContext(), getDataLayout());
7555 CB.addStruct(0, StrTy);
7556 CB.pad(llvm::alignTo(CB.DL.getTypeSizeInBits(StrTy), 64));
7558 // Try to use the original type for coercion.
7559 llvm::Type *CoerceTy = CB.isUsableType(StrTy) ? StrTy : CB.getType();
7562 return ABIArgInfo::getDirectInReg(CoerceTy);
7564 return ABIArgInfo::getDirect(CoerceTy);
7567 Address SparcV9ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
7568 QualType Ty) const {
7569 ABIArgInfo AI = classifyType(Ty, 16 * 8);
7570 llvm::Type *ArgTy = CGT.ConvertType(Ty);
7571 if (AI.canHaveCoerceToType() && !AI.getCoerceToType())
7572 AI.setCoerceToType(ArgTy);
7574 CharUnits SlotSize = CharUnits::fromQuantity(8);
7576 CGBuilderTy &Builder = CGF.Builder;
7577 Address Addr(Builder.CreateLoad(VAListAddr, "ap.cur"), SlotSize);
7578 llvm::Type *ArgPtrTy = llvm::PointerType::getUnqual(ArgTy);
7580 auto TypeInfo = getContext().getTypeInfoInChars(Ty);
7582 Address ArgAddr = Address::invalid();
7584 switch (AI.getKind()) {
7585 case ABIArgInfo::Expand:
7586 case ABIArgInfo::CoerceAndExpand:
7587 case ABIArgInfo::InAlloca:
7588 llvm_unreachable("Unsupported ABI kind for va_arg");
7590 case ABIArgInfo::Extend: {
7592 CharUnits Offset = SlotSize - TypeInfo.first;
7593 ArgAddr = Builder.CreateConstInBoundsByteGEP(Addr, Offset, "extend");
7597 case ABIArgInfo::Direct: {
7598 auto AllocSize = getDataLayout().getTypeAllocSize(AI.getCoerceToType());
7599 Stride = CharUnits::fromQuantity(AllocSize).alignTo(SlotSize);
7604 case ABIArgInfo::Indirect:
7606 ArgAddr = Builder.CreateElementBitCast(Addr, ArgPtrTy, "indirect");
7607 ArgAddr = Address(Builder.CreateLoad(ArgAddr, "indirect.arg"),
7611 case ABIArgInfo::Ignore:
7612 return Address(llvm::UndefValue::get(ArgPtrTy), TypeInfo.second);
7616 llvm::Value *NextPtr =
7617 Builder.CreateConstInBoundsByteGEP(Addr.getPointer(), Stride, "ap.next");
7618 Builder.CreateStore(NextPtr, VAListAddr);
7620 return Builder.CreateBitCast(ArgAddr, ArgPtrTy, "arg.addr");
7623 void SparcV9ABIInfo::computeInfo(CGFunctionInfo &FI) const {
7624 FI.getReturnInfo() = classifyType(FI.getReturnType(), 32 * 8);
7625 for (auto &I : FI.arguments())
7626 I.info = classifyType(I.type, 16 * 8);
7630 class SparcV9TargetCodeGenInfo : public TargetCodeGenInfo {
7632 SparcV9TargetCodeGenInfo(CodeGenTypes &CGT)
7633 : TargetCodeGenInfo(new SparcV9ABIInfo(CGT)) {}
7635 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
7639 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
7640 llvm::Value *Address) const override;
7642 } // end anonymous namespace
7645 SparcV9TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
7646 llvm::Value *Address) const {
7647 // This is calculated from the LLVM and GCC tables and verified
7648 // against gcc output. AFAIK all ABIs use the same encoding.
7650 CodeGen::CGBuilderTy &Builder = CGF.Builder;
7652 llvm::IntegerType *i8 = CGF.Int8Ty;
7653 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
7654 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
7656 // 0-31: the 8-byte general-purpose registers
7657 AssignToArrayRange(Builder, Address, Eight8, 0, 31);
7659 // 32-63: f0-31, the 4-byte floating-point registers
7660 AssignToArrayRange(Builder, Address, Four8, 32, 63);
7670 AssignToArrayRange(Builder, Address, Eight8, 64, 71);
7672 // 72-87: d0-15, the 8-byte floating-point registers
7673 AssignToArrayRange(Builder, Address, Eight8, 72, 87);
7679 //===----------------------------------------------------------------------===//
7680 // XCore ABI Implementation
7681 //===----------------------------------------------------------------------===//
7685 /// A SmallStringEnc instance is used to build up the TypeString by passing
7686 /// it by reference between functions that append to it.
7687 typedef llvm::SmallString<128> SmallStringEnc;
7689 /// TypeStringCache caches the meta encodings of Types.
7691 /// The reason for caching TypeStrings is two fold:
7692 /// 1. To cache a type's encoding for later uses;
7693 /// 2. As a means to break recursive member type inclusion.
7695 /// A cache Entry can have a Status of:
7696 /// NonRecursive: The type encoding is not recursive;
7697 /// Recursive: The type encoding is recursive;
7698 /// Incomplete: An incomplete TypeString;
7699 /// IncompleteUsed: An incomplete TypeString that has been used in a
7700 /// Recursive type encoding.
7702 /// A NonRecursive entry will have all of its sub-members expanded as fully
7703 /// as possible. Whilst it may contain types which are recursive, the type
7704 /// itself is not recursive and thus its encoding may be safely used whenever
7705 /// the type is encountered.
7707 /// A Recursive entry will have all of its sub-members expanded as fully as
7708 /// possible. The type itself is recursive and it may contain other types which
7709 /// are recursive. The Recursive encoding must not be used during the expansion
7710 /// of a recursive type's recursive branch. For simplicity the code uses
7711 /// IncompleteCount to reject all usage of Recursive encodings for member types.
7713 /// An Incomplete entry is always a RecordType and only encodes its
7714 /// identifier e.g. "s(S){}". Incomplete 'StubEnc' entries are ephemeral and
7715 /// are placed into the cache during type expansion as a means to identify and
7716 /// handle recursive inclusion of types as sub-members. If there is recursion
7717 /// the entry becomes IncompleteUsed.
7719 /// During the expansion of a RecordType's members:
7721 /// If the cache contains a NonRecursive encoding for the member type, the
7722 /// cached encoding is used;
7724 /// If the cache contains a Recursive encoding for the member type, the
7725 /// cached encoding is 'Swapped' out, as it may be incorrect, and...
7727 /// If the member is a RecordType, an Incomplete encoding is placed into the
7728 /// cache to break potential recursive inclusion of itself as a sub-member;
7730 /// Once a member RecordType has been expanded, its temporary incomplete
7731 /// entry is removed from the cache. If a Recursive encoding was swapped out
7732 /// it is swapped back in;
7734 /// If an incomplete entry is used to expand a sub-member, the incomplete
7735 /// entry is marked as IncompleteUsed. The cache keeps count of how many
7736 /// IncompleteUsed entries it currently contains in IncompleteUsedCount;
7738 /// If a member's encoding is found to be a NonRecursive or Recursive viz:
7739 /// IncompleteUsedCount==0, the member's encoding is added to the cache.
7740 /// Else the member is part of a recursive type and thus the recursion has
7741 /// been exited too soon for the encoding to be correct for the member.
7743 class TypeStringCache {
7744 enum Status {NonRecursive, Recursive, Incomplete, IncompleteUsed};
7746 std::string Str; // The encoded TypeString for the type.
7747 enum Status State; // Information about the encoding in 'Str'.
7748 std::string Swapped; // A temporary place holder for a Recursive encoding
7749 // during the expansion of RecordType's members.
7751 std::map<const IdentifierInfo *, struct Entry> Map;
7752 unsigned IncompleteCount; // Number of Incomplete entries in the Map.
7753 unsigned IncompleteUsedCount; // Number of IncompleteUsed entries in the Map.
7755 TypeStringCache() : IncompleteCount(0), IncompleteUsedCount(0) {}
7756 void addIncomplete(const IdentifierInfo *ID, std::string StubEnc);
7757 bool removeIncomplete(const IdentifierInfo *ID);
7758 void addIfComplete(const IdentifierInfo *ID, StringRef Str,
7760 StringRef lookupStr(const IdentifierInfo *ID);
7763 /// TypeString encodings for enum & union fields must be order.
7764 /// FieldEncoding is a helper for this ordering process.
7765 class FieldEncoding {
7769 FieldEncoding(bool b, SmallStringEnc &e) : HasName(b), Enc(e.c_str()) {}
7770 StringRef str() { return Enc; }
7771 bool operator<(const FieldEncoding &rhs) const {
7772 if (HasName != rhs.HasName) return HasName;
7773 return Enc < rhs.Enc;
7777 class XCoreABIInfo : public DefaultABIInfo {
7779 XCoreABIInfo(CodeGen::CodeGenTypes &CGT) : DefaultABIInfo(CGT) {}
7780 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
7781 QualType Ty) const override;
7784 class XCoreTargetCodeGenInfo : public TargetCodeGenInfo {
7785 mutable TypeStringCache TSC;
7787 XCoreTargetCodeGenInfo(CodeGenTypes &CGT)
7788 :TargetCodeGenInfo(new XCoreABIInfo(CGT)) {}
7789 void emitTargetMD(const Decl *D, llvm::GlobalValue *GV,
7790 CodeGen::CodeGenModule &M) const override;
7793 } // End anonymous namespace.
7795 // TODO: this implementation is likely now redundant with the default
7797 Address XCoreABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
7798 QualType Ty) const {
7799 CGBuilderTy &Builder = CGF.Builder;
7802 CharUnits SlotSize = CharUnits::fromQuantity(4);
7803 Address AP(Builder.CreateLoad(VAListAddr), SlotSize);
7805 // Handle the argument.
7806 ABIArgInfo AI = classifyArgumentType(Ty);
7807 CharUnits TypeAlign = getContext().getTypeAlignInChars(Ty);
7808 llvm::Type *ArgTy = CGT.ConvertType(Ty);
7809 if (AI.canHaveCoerceToType() && !AI.getCoerceToType())
7810 AI.setCoerceToType(ArgTy);
7811 llvm::Type *ArgPtrTy = llvm::PointerType::getUnqual(ArgTy);
7813 Address Val = Address::invalid();
7814 CharUnits ArgSize = CharUnits::Zero();
7815 switch (AI.getKind()) {
7816 case ABIArgInfo::Expand:
7817 case ABIArgInfo::CoerceAndExpand:
7818 case ABIArgInfo::InAlloca:
7819 llvm_unreachable("Unsupported ABI kind for va_arg");
7820 case ABIArgInfo::Ignore:
7821 Val = Address(llvm::UndefValue::get(ArgPtrTy), TypeAlign);
7822 ArgSize = CharUnits::Zero();
7824 case ABIArgInfo::Extend:
7825 case ABIArgInfo::Direct:
7826 Val = Builder.CreateBitCast(AP, ArgPtrTy);
7827 ArgSize = CharUnits::fromQuantity(
7828 getDataLayout().getTypeAllocSize(AI.getCoerceToType()));
7829 ArgSize = ArgSize.alignTo(SlotSize);
7831 case ABIArgInfo::Indirect:
7832 Val = Builder.CreateElementBitCast(AP, ArgPtrTy);
7833 Val = Address(Builder.CreateLoad(Val), TypeAlign);
7838 // Increment the VAList.
7839 if (!ArgSize.isZero()) {
7841 Builder.CreateConstInBoundsByteGEP(AP.getPointer(), ArgSize);
7842 Builder.CreateStore(APN, VAListAddr);
7848 /// During the expansion of a RecordType, an incomplete TypeString is placed
7849 /// into the cache as a means to identify and break recursion.
7850 /// If there is a Recursive encoding in the cache, it is swapped out and will
7851 /// be reinserted by removeIncomplete().
7852 /// All other types of encoding should have been used rather than arriving here.
7853 void TypeStringCache::addIncomplete(const IdentifierInfo *ID,
7854 std::string StubEnc) {
7858 assert( (E.Str.empty() || E.State == Recursive) &&
7859 "Incorrectly use of addIncomplete");
7860 assert(!StubEnc.empty() && "Passing an empty string to addIncomplete()");
7861 E.Swapped.swap(E.Str); // swap out the Recursive
7862 E.Str.swap(StubEnc);
7863 E.State = Incomplete;
7867 /// Once the RecordType has been expanded, the temporary incomplete TypeString
7868 /// must be removed from the cache.
7869 /// If a Recursive was swapped out by addIncomplete(), it will be replaced.
7870 /// Returns true if the RecordType was defined recursively.
7871 bool TypeStringCache::removeIncomplete(const IdentifierInfo *ID) {
7874 auto I = Map.find(ID);
7875 assert(I != Map.end() && "Entry not present");
7876 Entry &E = I->second;
7877 assert( (E.State == Incomplete ||
7878 E.State == IncompleteUsed) &&
7879 "Entry must be an incomplete type");
7880 bool IsRecursive = false;
7881 if (E.State == IncompleteUsed) {
7882 // We made use of our Incomplete encoding, thus we are recursive.
7884 --IncompleteUsedCount;
7886 if (E.Swapped.empty())
7889 // Swap the Recursive back.
7890 E.Swapped.swap(E.Str);
7892 E.State = Recursive;
7898 /// Add the encoded TypeString to the cache only if it is NonRecursive or
7899 /// Recursive (viz: all sub-members were expanded as fully as possible).
7900 void TypeStringCache::addIfComplete(const IdentifierInfo *ID, StringRef Str,
7902 if (!ID || IncompleteUsedCount)
7903 return; // No key or it is is an incomplete sub-type so don't add.
7905 if (IsRecursive && !E.Str.empty()) {
7906 assert(E.State==Recursive && E.Str.size() == Str.size() &&
7907 "This is not the same Recursive entry");
7908 // The parent container was not recursive after all, so we could have used
7909 // this Recursive sub-member entry after all, but we assumed the worse when
7910 // we started viz: IncompleteCount!=0.
7913 assert(E.Str.empty() && "Entry already present");
7915 E.State = IsRecursive? Recursive : NonRecursive;
7918 /// Return a cached TypeString encoding for the ID. If there isn't one, or we
7919 /// are recursively expanding a type (IncompleteCount != 0) and the cached
7920 /// encoding is Recursive, return an empty StringRef.
7921 StringRef TypeStringCache::lookupStr(const IdentifierInfo *ID) {
7923 return StringRef(); // We have no key.
7924 auto I = Map.find(ID);
7926 return StringRef(); // We have no encoding.
7927 Entry &E = I->second;
7928 if (E.State == Recursive && IncompleteCount)
7929 return StringRef(); // We don't use Recursive encodings for member types.
7931 if (E.State == Incomplete) {
7932 // The incomplete type is being used to break out of recursion.
7933 E.State = IncompleteUsed;
7934 ++IncompleteUsedCount;
7939 /// The XCore ABI includes a type information section that communicates symbol
7940 /// type information to the linker. The linker uses this information to verify
7941 /// safety/correctness of things such as array bound and pointers et al.
7942 /// The ABI only requires C (and XC) language modules to emit TypeStrings.
7943 /// This type information (TypeString) is emitted into meta data for all global
7944 /// symbols: definitions, declarations, functions & variables.
7946 /// The TypeString carries type, qualifier, name, size & value details.
7947 /// Please see 'Tools Development Guide' section 2.16.2 for format details:
7948 /// https://www.xmos.com/download/public/Tools-Development-Guide%28X9114A%29.pdf
7949 /// The output is tested by test/CodeGen/xcore-stringtype.c.
7951 static bool getTypeString(SmallStringEnc &Enc, const Decl *D,
7952 CodeGen::CodeGenModule &CGM, TypeStringCache &TSC);
7954 /// XCore uses emitTargetMD to emit TypeString metadata for global symbols.
7955 void XCoreTargetCodeGenInfo::emitTargetMD(const Decl *D, llvm::GlobalValue *GV,
7956 CodeGen::CodeGenModule &CGM) const {
7958 if (getTypeString(Enc, D, CGM, TSC)) {
7959 llvm::LLVMContext &Ctx = CGM.getModule().getContext();
7960 llvm::Metadata *MDVals[] = {llvm::ConstantAsMetadata::get(GV),
7961 llvm::MDString::get(Ctx, Enc.str())};
7962 llvm::NamedMDNode *MD =
7963 CGM.getModule().getOrInsertNamedMetadata("xcore.typestrings");
7964 MD->addOperand(llvm::MDNode::get(Ctx, MDVals));
7968 //===----------------------------------------------------------------------===//
7969 // SPIR ABI Implementation
7970 //===----------------------------------------------------------------------===//
7973 class SPIRTargetCodeGenInfo : public TargetCodeGenInfo {
7975 SPIRTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
7976 : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {}
7977 void emitTargetMD(const Decl *D, llvm::GlobalValue *GV,
7978 CodeGen::CodeGenModule &M) const override;
7979 unsigned getOpenCLKernelCallingConv() const override;
7981 } // End anonymous namespace.
7983 /// Emit SPIR specific metadata: OpenCL and SPIR version.
7984 void SPIRTargetCodeGenInfo::emitTargetMD(const Decl *D, llvm::GlobalValue *GV,
7985 CodeGen::CodeGenModule &CGM) const {
7986 llvm::LLVMContext &Ctx = CGM.getModule().getContext();
7987 llvm::Type *Int32Ty = llvm::Type::getInt32Ty(Ctx);
7988 llvm::Module &M = CGM.getModule();
7989 // SPIR v2.0 s2.12 - The SPIR version used by the module is stored in the
7990 // opencl.spir.version named metadata.
7991 llvm::Metadata *SPIRVerElts[] = {
7992 llvm::ConstantAsMetadata::get(
7993 llvm::ConstantInt::get(Int32Ty, CGM.getLangOpts().OpenCLVersion / 100)),
7994 llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
7995 Int32Ty, (CGM.getLangOpts().OpenCLVersion / 100 > 1) ? 0 : 2))};
7996 llvm::NamedMDNode *SPIRVerMD =
7997 M.getOrInsertNamedMetadata("opencl.spir.version");
7998 SPIRVerMD->addOperand(llvm::MDNode::get(Ctx, SPIRVerElts));
7999 appendOpenCLVersionMD(CGM);
8002 static void appendOpenCLVersionMD(CodeGen::CodeGenModule &CGM) {
8003 llvm::LLVMContext &Ctx = CGM.getModule().getContext();
8004 llvm::Type *Int32Ty = llvm::Type::getInt32Ty(Ctx);
8005 llvm::Module &M = CGM.getModule();
8006 // SPIR v2.0 s2.13 - The OpenCL version used by the module is stored in the
8007 // opencl.ocl.version named metadata node.
8008 llvm::Metadata *OCLVerElts[] = {
8009 llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
8010 Int32Ty, CGM.getLangOpts().OpenCLVersion / 100)),
8011 llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
8012 Int32Ty, (CGM.getLangOpts().OpenCLVersion % 100) / 10))};
8013 llvm::NamedMDNode *OCLVerMD =
8014 M.getOrInsertNamedMetadata("opencl.ocl.version");
8015 OCLVerMD->addOperand(llvm::MDNode::get(Ctx, OCLVerElts));
8018 unsigned SPIRTargetCodeGenInfo::getOpenCLKernelCallingConv() const {
8019 return llvm::CallingConv::SPIR_KERNEL;
8022 static bool appendType(SmallStringEnc &Enc, QualType QType,
8023 const CodeGen::CodeGenModule &CGM,
8024 TypeStringCache &TSC);
8026 /// Helper function for appendRecordType().
8027 /// Builds a SmallVector containing the encoded field types in declaration
8029 static bool extractFieldType(SmallVectorImpl<FieldEncoding> &FE,
8030 const RecordDecl *RD,
8031 const CodeGen::CodeGenModule &CGM,
8032 TypeStringCache &TSC) {
8033 for (const auto *Field : RD->fields()) {
8036 Enc += Field->getName();
8038 if (Field->isBitField()) {
8040 llvm::raw_svector_ostream OS(Enc);
8041 OS << Field->getBitWidthValue(CGM.getContext());
8044 if (!appendType(Enc, Field->getType(), CGM, TSC))
8046 if (Field->isBitField())
8049 FE.emplace_back(!Field->getName().empty(), Enc);
8054 /// Appends structure and union types to Enc and adds encoding to cache.
8055 /// Recursively calls appendType (via extractFieldType) for each field.
8056 /// Union types have their fields ordered according to the ABI.
8057 static bool appendRecordType(SmallStringEnc &Enc, const RecordType *RT,
8058 const CodeGen::CodeGenModule &CGM,
8059 TypeStringCache &TSC, const IdentifierInfo *ID) {
8060 // Append the cached TypeString if we have one.
8061 StringRef TypeString = TSC.lookupStr(ID);
8062 if (!TypeString.empty()) {
8067 // Start to emit an incomplete TypeString.
8068 size_t Start = Enc.size();
8069 Enc += (RT->isUnionType()? 'u' : 's');
8072 Enc += ID->getName();
8075 // We collect all encoded fields and order as necessary.
8076 bool IsRecursive = false;
8077 const RecordDecl *RD = RT->getDecl()->getDefinition();
8078 if (RD && !RD->field_empty()) {
8079 // An incomplete TypeString stub is placed in the cache for this RecordType
8080 // so that recursive calls to this RecordType will use it whilst building a
8081 // complete TypeString for this RecordType.
8082 SmallVector<FieldEncoding, 16> FE;
8083 std::string StubEnc(Enc.substr(Start).str());
8084 StubEnc += '}'; // StubEnc now holds a valid incomplete TypeString.
8085 TSC.addIncomplete(ID, std::move(StubEnc));
8086 if (!extractFieldType(FE, RD, CGM, TSC)) {
8087 (void) TSC.removeIncomplete(ID);
8090 IsRecursive = TSC.removeIncomplete(ID);
8091 // The ABI requires unions to be sorted but not structures.
8092 // See FieldEncoding::operator< for sort algorithm.
8093 if (RT->isUnionType())
8094 std::sort(FE.begin(), FE.end());
8095 // We can now complete the TypeString.
8096 unsigned E = FE.size();
8097 for (unsigned I = 0; I != E; ++I) {
8104 TSC.addIfComplete(ID, Enc.substr(Start), IsRecursive);
8108 /// Appends enum types to Enc and adds the encoding to the cache.
8109 static bool appendEnumType(SmallStringEnc &Enc, const EnumType *ET,
8110 TypeStringCache &TSC,
8111 const IdentifierInfo *ID) {
8112 // Append the cached TypeString if we have one.
8113 StringRef TypeString = TSC.lookupStr(ID);
8114 if (!TypeString.empty()) {
8119 size_t Start = Enc.size();
8122 Enc += ID->getName();
8125 // We collect all encoded enumerations and order them alphanumerically.
8126 if (const EnumDecl *ED = ET->getDecl()->getDefinition()) {
8127 SmallVector<FieldEncoding, 16> FE;
8128 for (auto I = ED->enumerator_begin(), E = ED->enumerator_end(); I != E;
8130 SmallStringEnc EnumEnc;
8132 EnumEnc += I->getName();
8134 I->getInitVal().toString(EnumEnc);
8136 FE.push_back(FieldEncoding(!I->getName().empty(), EnumEnc));
8138 std::sort(FE.begin(), FE.end());
8139 unsigned E = FE.size();
8140 for (unsigned I = 0; I != E; ++I) {
8147 TSC.addIfComplete(ID, Enc.substr(Start), false);
8151 /// Appends type's qualifier to Enc.
8152 /// This is done prior to appending the type's encoding.
8153 static void appendQualifier(SmallStringEnc &Enc, QualType QT) {
8154 // Qualifiers are emitted in alphabetical order.
8155 static const char *const Table[]={"","c:","r:","cr:","v:","cv:","rv:","crv:"};
8157 if (QT.isConstQualified())
8159 if (QT.isRestrictQualified())
8161 if (QT.isVolatileQualified())
8163 Enc += Table[Lookup];
8166 /// Appends built-in types to Enc.
8167 static bool appendBuiltinType(SmallStringEnc &Enc, const BuiltinType *BT) {
8168 const char *EncType;
8169 switch (BT->getKind()) {
8170 case BuiltinType::Void:
8173 case BuiltinType::Bool:
8176 case BuiltinType::Char_U:
8179 case BuiltinType::UChar:
8182 case BuiltinType::SChar:
8185 case BuiltinType::UShort:
8188 case BuiltinType::Short:
8191 case BuiltinType::UInt:
8194 case BuiltinType::Int:
8197 case BuiltinType::ULong:
8200 case BuiltinType::Long:
8203 case BuiltinType::ULongLong:
8206 case BuiltinType::LongLong:
8209 case BuiltinType::Float:
8212 case BuiltinType::Double:
8215 case BuiltinType::LongDouble:
8225 /// Appends a pointer encoding to Enc before calling appendType for the pointee.
8226 static bool appendPointerType(SmallStringEnc &Enc, const PointerType *PT,
8227 const CodeGen::CodeGenModule &CGM,
8228 TypeStringCache &TSC) {
8230 if (!appendType(Enc, PT->getPointeeType(), CGM, TSC))
8236 /// Appends array encoding to Enc before calling appendType for the element.
8237 static bool appendArrayType(SmallStringEnc &Enc, QualType QT,
8238 const ArrayType *AT,
8239 const CodeGen::CodeGenModule &CGM,
8240 TypeStringCache &TSC, StringRef NoSizeEnc) {
8241 if (AT->getSizeModifier() != ArrayType::Normal)
8244 if (const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(AT))
8245 CAT->getSize().toStringUnsigned(Enc);
8247 Enc += NoSizeEnc; // Global arrays use "*", otherwise it is "".
8249 // The Qualifiers should be attached to the type rather than the array.
8250 appendQualifier(Enc, QT);
8251 if (!appendType(Enc, AT->getElementType(), CGM, TSC))
8257 /// Appends a function encoding to Enc, calling appendType for the return type
8258 /// and the arguments.
8259 static bool appendFunctionType(SmallStringEnc &Enc, const FunctionType *FT,
8260 const CodeGen::CodeGenModule &CGM,
8261 TypeStringCache &TSC) {
8263 if (!appendType(Enc, FT->getReturnType(), CGM, TSC))
8266 if (const FunctionProtoType *FPT = FT->getAs<FunctionProtoType>()) {
8267 // N.B. we are only interested in the adjusted param types.
8268 auto I = FPT->param_type_begin();
8269 auto E = FPT->param_type_end();
8272 if (!appendType(Enc, *I, CGM, TSC))
8278 if (FPT->isVariadic())
8281 if (FPT->isVariadic())
8291 /// Handles the type's qualifier before dispatching a call to handle specific
8293 static bool appendType(SmallStringEnc &Enc, QualType QType,
8294 const CodeGen::CodeGenModule &CGM,
8295 TypeStringCache &TSC) {
8297 QualType QT = QType.getCanonicalType();
8299 if (const ArrayType *AT = QT->getAsArrayTypeUnsafe())
8300 // The Qualifiers should be attached to the type rather than the array.
8301 // Thus we don't call appendQualifier() here.
8302 return appendArrayType(Enc, QT, AT, CGM, TSC, "");
8304 appendQualifier(Enc, QT);
8306 if (const BuiltinType *BT = QT->getAs<BuiltinType>())
8307 return appendBuiltinType(Enc, BT);
8309 if (const PointerType *PT = QT->getAs<PointerType>())
8310 return appendPointerType(Enc, PT, CGM, TSC);
8312 if (const EnumType *ET = QT->getAs<EnumType>())
8313 return appendEnumType(Enc, ET, TSC, QT.getBaseTypeIdentifier());
8315 if (const RecordType *RT = QT->getAsStructureType())
8316 return appendRecordType(Enc, RT, CGM, TSC, QT.getBaseTypeIdentifier());
8318 if (const RecordType *RT = QT->getAsUnionType())
8319 return appendRecordType(Enc, RT, CGM, TSC, QT.getBaseTypeIdentifier());
8321 if (const FunctionType *FT = QT->getAs<FunctionType>())
8322 return appendFunctionType(Enc, FT, CGM, TSC);
8327 static bool getTypeString(SmallStringEnc &Enc, const Decl *D,
8328 CodeGen::CodeGenModule &CGM, TypeStringCache &TSC) {
8332 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
8333 if (FD->getLanguageLinkage() != CLanguageLinkage)
8335 return appendType(Enc, FD->getType(), CGM, TSC);
8338 if (const VarDecl *VD = dyn_cast<VarDecl>(D)) {
8339 if (VD->getLanguageLinkage() != CLanguageLinkage)
8341 QualType QT = VD->getType().getCanonicalType();
8342 if (const ArrayType *AT = QT->getAsArrayTypeUnsafe()) {
8343 // Global ArrayTypes are given a size of '*' if the size is unknown.
8344 // The Qualifiers should be attached to the type rather than the array.
8345 // Thus we don't call appendQualifier() here.
8346 return appendArrayType(Enc, QT, AT, CGM, TSC, "*");
8348 return appendType(Enc, QT, CGM, TSC);
8354 //===----------------------------------------------------------------------===//
8356 //===----------------------------------------------------------------------===//
8358 bool CodeGenModule::supportsCOMDAT() const {
8359 return getTriple().supportsCOMDAT();
8362 const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() {
8363 if (TheTargetCodeGenInfo)
8364 return *TheTargetCodeGenInfo;
8366 // Helper to set the unique_ptr while still keeping the return value.
8367 auto SetCGInfo = [&](TargetCodeGenInfo *P) -> const TargetCodeGenInfo & {
8368 this->TheTargetCodeGenInfo.reset(P);
8372 const llvm::Triple &Triple = getTarget().getTriple();
8373 switch (Triple.getArch()) {
8375 return SetCGInfo(new DefaultTargetCodeGenInfo(Types));
8377 case llvm::Triple::le32:
8378 return SetCGInfo(new PNaClTargetCodeGenInfo(Types));
8379 case llvm::Triple::mips:
8380 case llvm::Triple::mipsel:
8381 if (Triple.getOS() == llvm::Triple::NaCl)
8382 return SetCGInfo(new PNaClTargetCodeGenInfo(Types));
8383 return SetCGInfo(new MIPSTargetCodeGenInfo(Types, true));
8385 case llvm::Triple::mips64:
8386 case llvm::Triple::mips64el:
8387 return SetCGInfo(new MIPSTargetCodeGenInfo(Types, false));
8389 case llvm::Triple::aarch64:
8390 case llvm::Triple::aarch64_be: {
8391 AArch64ABIInfo::ABIKind Kind = AArch64ABIInfo::AAPCS;
8392 if (getTarget().getABI() == "darwinpcs")
8393 Kind = AArch64ABIInfo::DarwinPCS;
8395 return SetCGInfo(new AArch64TargetCodeGenInfo(Types, Kind));
8398 case llvm::Triple::wasm32:
8399 case llvm::Triple::wasm64:
8400 return SetCGInfo(new WebAssemblyTargetCodeGenInfo(Types));
8402 case llvm::Triple::arm:
8403 case llvm::Triple::armeb:
8404 case llvm::Triple::thumb:
8405 case llvm::Triple::thumbeb: {
8406 if (Triple.getOS() == llvm::Triple::Win32) {
8408 new WindowsARMTargetCodeGenInfo(Types, ARMABIInfo::AAPCS_VFP));
8411 ARMABIInfo::ABIKind Kind = ARMABIInfo::AAPCS;
8412 StringRef ABIStr = getTarget().getABI();
8413 if (ABIStr == "apcs-gnu")
8414 Kind = ARMABIInfo::APCS;
8415 else if (ABIStr == "aapcs16")
8416 Kind = ARMABIInfo::AAPCS16_VFP;
8417 else if (CodeGenOpts.FloatABI == "hard" ||
8418 (CodeGenOpts.FloatABI != "soft" &&
8419 (Triple.getEnvironment() == llvm::Triple::GNUEABIHF ||
8420 Triple.getEnvironment() == llvm::Triple::MuslEABIHF ||
8421 Triple.getEnvironment() == llvm::Triple::EABIHF)))
8422 Kind = ARMABIInfo::AAPCS_VFP;
8424 return SetCGInfo(new ARMTargetCodeGenInfo(Types, Kind));
8427 case llvm::Triple::ppc:
8429 new PPC32TargetCodeGenInfo(Types, CodeGenOpts.FloatABI == "soft"));
8430 case llvm::Triple::ppc64:
8431 if (Triple.isOSBinFormatELF()) {
8432 PPC64_SVR4_ABIInfo::ABIKind Kind = PPC64_SVR4_ABIInfo::ELFv1;
8433 if (getTarget().getABI() == "elfv2")
8434 Kind = PPC64_SVR4_ABIInfo::ELFv2;
8435 bool HasQPX = getTarget().getABI() == "elfv1-qpx";
8436 bool IsSoftFloat = CodeGenOpts.FloatABI == "soft";
8438 return SetCGInfo(new PPC64_SVR4_TargetCodeGenInfo(Types, Kind, HasQPX,
8441 return SetCGInfo(new PPC64TargetCodeGenInfo(Types));
8442 case llvm::Triple::ppc64le: {
8443 assert(Triple.isOSBinFormatELF() && "PPC64 LE non-ELF not supported!");
8444 PPC64_SVR4_ABIInfo::ABIKind Kind = PPC64_SVR4_ABIInfo::ELFv2;
8445 if (getTarget().getABI() == "elfv1" || getTarget().getABI() == "elfv1-qpx")
8446 Kind = PPC64_SVR4_ABIInfo::ELFv1;
8447 bool HasQPX = getTarget().getABI() == "elfv1-qpx";
8448 bool IsSoftFloat = CodeGenOpts.FloatABI == "soft";
8450 return SetCGInfo(new PPC64_SVR4_TargetCodeGenInfo(Types, Kind, HasQPX,
8454 case llvm::Triple::nvptx:
8455 case llvm::Triple::nvptx64:
8456 return SetCGInfo(new NVPTXTargetCodeGenInfo(Types));
8458 case llvm::Triple::msp430:
8459 return SetCGInfo(new MSP430TargetCodeGenInfo(Types));
8461 case llvm::Triple::systemz: {
8462 bool HasVector = getTarget().getABI() == "vector";
8463 return SetCGInfo(new SystemZTargetCodeGenInfo(Types, HasVector));
8466 case llvm::Triple::tce:
8467 case llvm::Triple::tcele:
8468 return SetCGInfo(new TCETargetCodeGenInfo(Types));
8470 case llvm::Triple::x86: {
8471 bool IsDarwinVectorABI = Triple.isOSDarwin();
8472 bool RetSmallStructInRegABI =
8473 X86_32TargetCodeGenInfo::isStructReturnInRegABI(Triple, CodeGenOpts);
8474 bool IsWin32FloatStructABI = Triple.isOSWindows() && !Triple.isOSCygMing();
8476 if (Triple.getOS() == llvm::Triple::Win32) {
8477 return SetCGInfo(new WinX86_32TargetCodeGenInfo(
8478 Types, IsDarwinVectorABI, RetSmallStructInRegABI,
8479 IsWin32FloatStructABI, CodeGenOpts.NumRegisterParameters));
8481 return SetCGInfo(new X86_32TargetCodeGenInfo(
8482 Types, IsDarwinVectorABI, RetSmallStructInRegABI,
8483 IsWin32FloatStructABI, CodeGenOpts.NumRegisterParameters,
8484 CodeGenOpts.FloatABI == "soft"));
8488 case llvm::Triple::x86_64: {
8489 StringRef ABI = getTarget().getABI();
8490 X86AVXABILevel AVXLevel =
8492 ? X86AVXABILevel::AVX512
8493 : ABI == "avx" ? X86AVXABILevel::AVX : X86AVXABILevel::None);
8495 switch (Triple.getOS()) {
8496 case llvm::Triple::Win32:
8497 return SetCGInfo(new WinX86_64TargetCodeGenInfo(Types, AVXLevel));
8498 case llvm::Triple::PS4:
8499 return SetCGInfo(new PS4TargetCodeGenInfo(Types, AVXLevel));
8501 return SetCGInfo(new X86_64TargetCodeGenInfo(Types, AVXLevel));
8504 case llvm::Triple::hexagon:
8505 return SetCGInfo(new HexagonTargetCodeGenInfo(Types));
8506 case llvm::Triple::lanai:
8507 return SetCGInfo(new LanaiTargetCodeGenInfo(Types));
8508 case llvm::Triple::r600:
8509 return SetCGInfo(new AMDGPUTargetCodeGenInfo(Types));
8510 case llvm::Triple::amdgcn:
8511 return SetCGInfo(new AMDGPUTargetCodeGenInfo(Types));
8512 case llvm::Triple::sparc:
8513 return SetCGInfo(new SparcV8TargetCodeGenInfo(Types));
8514 case llvm::Triple::sparcv9:
8515 return SetCGInfo(new SparcV9TargetCodeGenInfo(Types));
8516 case llvm::Triple::xcore:
8517 return SetCGInfo(new XCoreTargetCodeGenInfo(Types));
8518 case llvm::Triple::spir:
8519 case llvm::Triple::spir64:
8520 return SetCGInfo(new SPIRTargetCodeGenInfo(Types));