1 //===---- TargetInfo.cpp - Encapsulate target details -----------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // These classes wrap the information about a call or function
11 // definition used to handle ABI compliancy.
13 //===----------------------------------------------------------------------===//
15 #include "TargetInfo.h"
19 #include "CodeGenFunction.h"
20 #include "clang/AST/RecordLayout.h"
21 #include "clang/CodeGen/CGFunctionInfo.h"
22 #include "clang/CodeGen/SwiftCallingConv.h"
23 #include "clang/Frontend/CodeGenOptions.h"
24 #include "llvm/ADT/StringExtras.h"
25 #include "llvm/ADT/Triple.h"
26 #include "llvm/IR/DataLayout.h"
27 #include "llvm/IR/Type.h"
28 #include "llvm/Support/raw_ostream.h"
29 #include <algorithm> // std::sort
31 using namespace clang;
32 using namespace CodeGen;
34 static void AssignToArrayRange(CodeGen::CGBuilderTy &Builder,
39 // Alternatively, we could emit this as a loop in the source.
40 for (unsigned I = FirstIndex; I <= LastIndex; ++I) {
42 Builder.CreateConstInBoundsGEP1_32(Builder.getInt8Ty(), Array, I);
43 Builder.CreateAlignedStore(Value, Cell, CharUnits::One());
47 static bool isAggregateTypeForABI(QualType T) {
48 return !CodeGenFunction::hasScalarEvaluationKind(T) ||
49 T->isMemberFunctionPointerType();
53 ABIInfo::getNaturalAlignIndirect(QualType Ty, bool ByRef, bool Realign,
54 llvm::Type *Padding) const {
55 return ABIArgInfo::getIndirect(getContext().getTypeAlignInChars(Ty),
56 ByRef, Realign, Padding);
60 ABIInfo::getNaturalAlignIndirectInReg(QualType Ty, bool Realign) const {
61 return ABIArgInfo::getIndirectInReg(getContext().getTypeAlignInChars(Ty),
62 /*ByRef*/ false, Realign);
65 Address ABIInfo::EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
67 return Address::invalid();
70 ABIInfo::~ABIInfo() {}
72 /// Does the given lowering require more than the given number of
73 /// registers when expanded?
75 /// This is intended to be the basis of a reasonable basic implementation
76 /// of should{Pass,Return}IndirectlyForSwift.
78 /// For most targets, a limit of four total registers is reasonable; this
79 /// limits the amount of code required in order to move around the value
80 /// in case it wasn't produced immediately prior to the call by the caller
81 /// (or wasn't produced in exactly the right registers) or isn't used
82 /// immediately within the callee. But some targets may need to further
83 /// limit the register count due to an inability to support that many
85 static bool occupiesMoreThan(CodeGenTypes &cgt,
86 ArrayRef<llvm::Type*> scalarTypes,
87 unsigned maxAllRegisters) {
88 unsigned intCount = 0, fpCount = 0;
89 for (llvm::Type *type : scalarTypes) {
90 if (type->isPointerTy()) {
92 } else if (auto intTy = dyn_cast<llvm::IntegerType>(type)) {
93 auto ptrWidth = cgt.getTarget().getPointerWidth(0);
94 intCount += (intTy->getBitWidth() + ptrWidth - 1) / ptrWidth;
96 assert(type->isVectorTy() || type->isFloatingPointTy());
101 return (intCount + fpCount > maxAllRegisters);
104 bool SwiftABIInfo::isLegalVectorTypeForSwift(CharUnits vectorSize,
106 unsigned numElts) const {
107 // The default implementation of this assumes that the target guarantees
108 // 128-bit SIMD support but nothing more.
109 return (vectorSize.getQuantity() > 8 && vectorSize.getQuantity() <= 16);
112 static CGCXXABI::RecordArgABI getRecordArgABI(const RecordType *RT,
114 const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl());
116 return CGCXXABI::RAA_Default;
117 return CXXABI.getRecordArgABI(RD);
120 static CGCXXABI::RecordArgABI getRecordArgABI(QualType T,
122 const RecordType *RT = T->getAs<RecordType>();
124 return CGCXXABI::RAA_Default;
125 return getRecordArgABI(RT, CXXABI);
128 /// Pass transparent unions as if they were the type of the first element. Sema
129 /// should ensure that all elements of the union have the same "machine type".
130 static QualType useFirstFieldIfTransparentUnion(QualType Ty) {
131 if (const RecordType *UT = Ty->getAsUnionType()) {
132 const RecordDecl *UD = UT->getDecl();
133 if (UD->hasAttr<TransparentUnionAttr>()) {
134 assert(!UD->field_empty() && "sema created an empty transparent union");
135 return UD->field_begin()->getType();
141 CGCXXABI &ABIInfo::getCXXABI() const {
142 return CGT.getCXXABI();
145 ASTContext &ABIInfo::getContext() const {
146 return CGT.getContext();
149 llvm::LLVMContext &ABIInfo::getVMContext() const {
150 return CGT.getLLVMContext();
153 const llvm::DataLayout &ABIInfo::getDataLayout() const {
154 return CGT.getDataLayout();
157 const TargetInfo &ABIInfo::getTarget() const {
158 return CGT.getTarget();
161 bool ABIInfo:: isAndroid() const { return getTarget().getTriple().isAndroid(); }
163 bool ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
167 bool ABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base,
168 uint64_t Members) const {
172 bool ABIInfo::shouldSignExtUnsignedType(QualType Ty) const {
176 LLVM_DUMP_METHOD void ABIArgInfo::dump() const {
177 raw_ostream &OS = llvm::errs();
178 OS << "(ABIArgInfo Kind=";
181 OS << "Direct Type=";
182 if (llvm::Type *Ty = getCoerceToType())
194 OS << "InAlloca Offset=" << getInAllocaFieldIndex();
197 OS << "Indirect Align=" << getIndirectAlign().getQuantity()
198 << " ByVal=" << getIndirectByVal()
199 << " Realign=" << getIndirectRealign();
204 case CoerceAndExpand:
205 OS << "CoerceAndExpand Type=";
206 getCoerceAndExpandType()->print(OS);
212 // Dynamically round a pointer up to a multiple of the given alignment.
213 static llvm::Value *emitRoundPointerUpToAlignment(CodeGenFunction &CGF,
216 llvm::Value *PtrAsInt = Ptr;
217 // OverflowArgArea = (OverflowArgArea + Align - 1) & -Align;
218 PtrAsInt = CGF.Builder.CreatePtrToInt(PtrAsInt, CGF.IntPtrTy);
219 PtrAsInt = CGF.Builder.CreateAdd(PtrAsInt,
220 llvm::ConstantInt::get(CGF.IntPtrTy, Align.getQuantity() - 1));
221 PtrAsInt = CGF.Builder.CreateAnd(PtrAsInt,
222 llvm::ConstantInt::get(CGF.IntPtrTy, -Align.getQuantity()));
223 PtrAsInt = CGF.Builder.CreateIntToPtr(PtrAsInt,
225 Ptr->getName() + ".aligned");
229 /// Emit va_arg for a platform using the common void* representation,
230 /// where arguments are simply emitted in an array of slots on the stack.
232 /// This version implements the core direct-value passing rules.
234 /// \param SlotSize - The size and alignment of a stack slot.
235 /// Each argument will be allocated to a multiple of this number of
236 /// slots, and all the slots will be aligned to this value.
237 /// \param AllowHigherAlign - The slot alignment is not a cap;
238 /// an argument type with an alignment greater than the slot size
239 /// will be emitted on a higher-alignment address, potentially
240 /// leaving one or more empty slots behind as padding. If this
241 /// is false, the returned address might be less-aligned than
243 static Address emitVoidPtrDirectVAArg(CodeGenFunction &CGF,
245 llvm::Type *DirectTy,
246 CharUnits DirectSize,
247 CharUnits DirectAlign,
249 bool AllowHigherAlign) {
250 // Cast the element type to i8* if necessary. Some platforms define
251 // va_list as a struct containing an i8* instead of just an i8*.
252 if (VAListAddr.getElementType() != CGF.Int8PtrTy)
253 VAListAddr = CGF.Builder.CreateElementBitCast(VAListAddr, CGF.Int8PtrTy);
255 llvm::Value *Ptr = CGF.Builder.CreateLoad(VAListAddr, "argp.cur");
257 // If the CC aligns values higher than the slot size, do so if needed.
258 Address Addr = Address::invalid();
259 if (AllowHigherAlign && DirectAlign > SlotSize) {
260 Addr = Address(emitRoundPointerUpToAlignment(CGF, Ptr, DirectAlign),
263 Addr = Address(Ptr, SlotSize);
266 // Advance the pointer past the argument, then store that back.
267 CharUnits FullDirectSize = DirectSize.alignTo(SlotSize);
268 llvm::Value *NextPtr =
269 CGF.Builder.CreateConstInBoundsByteGEP(Addr.getPointer(), FullDirectSize,
271 CGF.Builder.CreateStore(NextPtr, VAListAddr);
273 // If the argument is smaller than a slot, and this is a big-endian
274 // target, the argument will be right-adjusted in its slot.
275 if (DirectSize < SlotSize && CGF.CGM.getDataLayout().isBigEndian() &&
276 !DirectTy->isStructTy()) {
277 Addr = CGF.Builder.CreateConstInBoundsByteGEP(Addr, SlotSize - DirectSize);
280 Addr = CGF.Builder.CreateElementBitCast(Addr, DirectTy);
284 /// Emit va_arg for a platform using the common void* representation,
285 /// where arguments are simply emitted in an array of slots on the stack.
287 /// \param IsIndirect - Values of this type are passed indirectly.
288 /// \param ValueInfo - The size and alignment of this type, generally
289 /// computed with getContext().getTypeInfoInChars(ValueTy).
290 /// \param SlotSizeAndAlign - The size and alignment of a stack slot.
291 /// Each argument will be allocated to a multiple of this number of
292 /// slots, and all the slots will be aligned to this value.
293 /// \param AllowHigherAlign - The slot alignment is not a cap;
294 /// an argument type with an alignment greater than the slot size
295 /// will be emitted on a higher-alignment address, potentially
296 /// leaving one or more empty slots behind as padding.
297 static Address emitVoidPtrVAArg(CodeGenFunction &CGF, Address VAListAddr,
298 QualType ValueTy, bool IsIndirect,
299 std::pair<CharUnits, CharUnits> ValueInfo,
300 CharUnits SlotSizeAndAlign,
301 bool AllowHigherAlign) {
302 // The size and alignment of the value that was passed directly.
303 CharUnits DirectSize, DirectAlign;
305 DirectSize = CGF.getPointerSize();
306 DirectAlign = CGF.getPointerAlign();
308 DirectSize = ValueInfo.first;
309 DirectAlign = ValueInfo.second;
312 // Cast the address we've calculated to the right type.
313 llvm::Type *DirectTy = CGF.ConvertTypeForMem(ValueTy);
315 DirectTy = DirectTy->getPointerTo(0);
317 Address Addr = emitVoidPtrDirectVAArg(CGF, VAListAddr, DirectTy,
318 DirectSize, DirectAlign,
323 Addr = Address(CGF.Builder.CreateLoad(Addr), ValueInfo.second);
330 static Address emitMergePHI(CodeGenFunction &CGF,
331 Address Addr1, llvm::BasicBlock *Block1,
332 Address Addr2, llvm::BasicBlock *Block2,
333 const llvm::Twine &Name = "") {
334 assert(Addr1.getType() == Addr2.getType());
335 llvm::PHINode *PHI = CGF.Builder.CreatePHI(Addr1.getType(), 2, Name);
336 PHI->addIncoming(Addr1.getPointer(), Block1);
337 PHI->addIncoming(Addr2.getPointer(), Block2);
338 CharUnits Align = std::min(Addr1.getAlignment(), Addr2.getAlignment());
339 return Address(PHI, Align);
342 TargetCodeGenInfo::~TargetCodeGenInfo() { delete Info; }
344 // If someone can figure out a general rule for this, that would be great.
345 // It's probably just doomed to be platform-dependent, though.
346 unsigned TargetCodeGenInfo::getSizeOfUnwindException() const {
348 // x86-64 FreeBSD, Linux, Darwin
349 // x86-32 FreeBSD, Linux, Darwin
350 // PowerPC Linux, Darwin
351 // ARM Darwin (*not* EABI)
356 bool TargetCodeGenInfo::isNoProtoCallVariadic(const CallArgList &args,
357 const FunctionNoProtoType *fnType) const {
358 // The following conventions are known to require this to be false:
361 // For everything else, we just prefer false unless we opt out.
366 TargetCodeGenInfo::getDependentLibraryOption(llvm::StringRef Lib,
367 llvm::SmallString<24> &Opt) const {
368 // This assumes the user is passing a library name like "rt" instead of a
369 // filename like "librt.a/so", and that they don't care whether it's static or
375 unsigned TargetCodeGenInfo::getOpenCLKernelCallingConv() const {
376 return llvm::CallingConv::C;
378 static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays);
380 /// isEmptyField - Return true iff a the field is "empty", that is it
381 /// is an unnamed bit-field or an (array of) empty record(s).
382 static bool isEmptyField(ASTContext &Context, const FieldDecl *FD,
384 if (FD->isUnnamedBitfield())
387 QualType FT = FD->getType();
389 // Constant arrays of empty records count as empty, strip them off.
390 // Constant arrays of zero length always count as empty.
392 while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) {
393 if (AT->getSize() == 0)
395 FT = AT->getElementType();
398 const RecordType *RT = FT->getAs<RecordType>();
402 // C++ record fields are never empty, at least in the Itanium ABI.
404 // FIXME: We should use a predicate for whether this behavior is true in the
406 if (isa<CXXRecordDecl>(RT->getDecl()))
409 return isEmptyRecord(Context, FT, AllowArrays);
412 /// isEmptyRecord - Return true iff a structure contains only empty
413 /// fields. Note that a structure with a flexible array member is not
414 /// considered empty.
415 static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays) {
416 const RecordType *RT = T->getAs<RecordType>();
419 const RecordDecl *RD = RT->getDecl();
420 if (RD->hasFlexibleArrayMember())
423 // If this is a C++ record, check the bases first.
424 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
425 for (const auto &I : CXXRD->bases())
426 if (!isEmptyRecord(Context, I.getType(), true))
429 for (const auto *I : RD->fields())
430 if (!isEmptyField(Context, I, AllowArrays))
435 /// isSingleElementStruct - Determine if a structure is a "single
436 /// element struct", i.e. it has exactly one non-empty field or
437 /// exactly one field which is itself a single element
438 /// struct. Structures with flexible array members are never
439 /// considered single element structs.
441 /// \return The field declaration for the single non-empty field, if
443 static const Type *isSingleElementStruct(QualType T, ASTContext &Context) {
444 const RecordType *RT = T->getAs<RecordType>();
448 const RecordDecl *RD = RT->getDecl();
449 if (RD->hasFlexibleArrayMember())
452 const Type *Found = nullptr;
454 // If this is a C++ record, check the bases first.
455 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
456 for (const auto &I : CXXRD->bases()) {
457 // Ignore empty records.
458 if (isEmptyRecord(Context, I.getType(), true))
461 // If we already found an element then this isn't a single-element struct.
465 // If this is non-empty and not a single element struct, the composite
466 // cannot be a single element struct.
467 Found = isSingleElementStruct(I.getType(), Context);
473 // Check for single element.
474 for (const auto *FD : RD->fields()) {
475 QualType FT = FD->getType();
477 // Ignore empty fields.
478 if (isEmptyField(Context, FD, true))
481 // If we already found an element then this isn't a single-element
486 // Treat single element arrays as the element.
487 while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) {
488 if (AT->getSize().getZExtValue() != 1)
490 FT = AT->getElementType();
493 if (!isAggregateTypeForABI(FT)) {
494 Found = FT.getTypePtr();
496 Found = isSingleElementStruct(FT, Context);
502 // We don't consider a struct a single-element struct if it has
503 // padding beyond the element type.
504 if (Found && Context.getTypeSize(Found) != Context.getTypeSize(T))
511 Address EmitVAArgInstr(CodeGenFunction &CGF, Address VAListAddr, QualType Ty,
512 const ABIArgInfo &AI) {
513 // This default implementation defers to the llvm backend's va_arg
514 // instruction. It can handle only passing arguments directly
515 // (typically only handled in the backend for primitive types), or
516 // aggregates passed indirectly by pointer (NOTE: if the "byval"
517 // flag has ABI impact in the callee, this implementation cannot
520 // Only a few cases are covered here at the moment -- those needed
521 // by the default abi.
524 if (AI.isIndirect()) {
525 assert(!AI.getPaddingType() &&
526 "Unexpected PaddingType seen in arginfo in generic VAArg emitter!");
528 !AI.getIndirectRealign() &&
529 "Unexpected IndirectRealign seen in arginfo in generic VAArg emitter!");
531 auto TyInfo = CGF.getContext().getTypeInfoInChars(Ty);
532 CharUnits TyAlignForABI = TyInfo.second;
535 llvm::PointerType::getUnqual(CGF.ConvertTypeForMem(Ty));
537 CGF.Builder.CreateVAArg(VAListAddr.getPointer(), BaseTy);
538 return Address(Addr, TyAlignForABI);
540 assert((AI.isDirect() || AI.isExtend()) &&
541 "Unexpected ArgInfo Kind in generic VAArg emitter!");
543 assert(!AI.getInReg() &&
544 "Unexpected InReg seen in arginfo in generic VAArg emitter!");
545 assert(!AI.getPaddingType() &&
546 "Unexpected PaddingType seen in arginfo in generic VAArg emitter!");
547 assert(!AI.getDirectOffset() &&
548 "Unexpected DirectOffset seen in arginfo in generic VAArg emitter!");
549 assert(!AI.getCoerceToType() &&
550 "Unexpected CoerceToType seen in arginfo in generic VAArg emitter!");
552 Address Temp = CGF.CreateMemTemp(Ty, "varet");
553 Val = CGF.Builder.CreateVAArg(VAListAddr.getPointer(), CGF.ConvertType(Ty));
554 CGF.Builder.CreateStore(Val, Temp);
559 /// DefaultABIInfo - The default implementation for ABI specific
560 /// details. This implementation provides information which results in
561 /// self-consistent and sensible LLVM IR generation, but does not
562 /// conform to any particular ABI.
563 class DefaultABIInfo : public ABIInfo {
565 DefaultABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {}
567 ABIArgInfo classifyReturnType(QualType RetTy) const;
568 ABIArgInfo classifyArgumentType(QualType RetTy) const;
570 void computeInfo(CGFunctionInfo &FI) const override {
571 if (!getCXXABI().classifyReturnType(FI))
572 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
573 for (auto &I : FI.arguments())
574 I.info = classifyArgumentType(I.type);
577 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
578 QualType Ty) const override {
579 return EmitVAArgInstr(CGF, VAListAddr, Ty, classifyArgumentType(Ty));
583 class DefaultTargetCodeGenInfo : public TargetCodeGenInfo {
585 DefaultTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
586 : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {}
589 ABIArgInfo DefaultABIInfo::classifyArgumentType(QualType Ty) const {
590 Ty = useFirstFieldIfTransparentUnion(Ty);
592 if (isAggregateTypeForABI(Ty)) {
593 // Records with non-trivial destructors/copy-constructors should not be
595 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
596 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
598 return getNaturalAlignIndirect(Ty);
601 // Treat an enum type as its underlying type.
602 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
603 Ty = EnumTy->getDecl()->getIntegerType();
605 return (Ty->isPromotableIntegerType() ?
606 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
609 ABIArgInfo DefaultABIInfo::classifyReturnType(QualType RetTy) const {
610 if (RetTy->isVoidType())
611 return ABIArgInfo::getIgnore();
613 if (isAggregateTypeForABI(RetTy))
614 return getNaturalAlignIndirect(RetTy);
616 // Treat an enum type as its underlying type.
617 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
618 RetTy = EnumTy->getDecl()->getIntegerType();
620 return (RetTy->isPromotableIntegerType() ?
621 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
624 //===----------------------------------------------------------------------===//
625 // WebAssembly ABI Implementation
627 // This is a very simple ABI that relies a lot on DefaultABIInfo.
628 //===----------------------------------------------------------------------===//
630 class WebAssemblyABIInfo final : public DefaultABIInfo {
632 explicit WebAssemblyABIInfo(CodeGen::CodeGenTypes &CGT)
633 : DefaultABIInfo(CGT) {}
636 ABIArgInfo classifyReturnType(QualType RetTy) const;
637 ABIArgInfo classifyArgumentType(QualType Ty) const;
639 // DefaultABIInfo's classifyReturnType and classifyArgumentType are
640 // non-virtual, but computeInfo and EmitVAArg are virtual, so we
642 void computeInfo(CGFunctionInfo &FI) const override {
643 if (!getCXXABI().classifyReturnType(FI))
644 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
645 for (auto &Arg : FI.arguments())
646 Arg.info = classifyArgumentType(Arg.type);
649 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
650 QualType Ty) const override;
653 class WebAssemblyTargetCodeGenInfo final : public TargetCodeGenInfo {
655 explicit WebAssemblyTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
656 : TargetCodeGenInfo(new WebAssemblyABIInfo(CGT)) {}
659 /// \brief Classify argument of given type \p Ty.
660 ABIArgInfo WebAssemblyABIInfo::classifyArgumentType(QualType Ty) const {
661 Ty = useFirstFieldIfTransparentUnion(Ty);
663 if (isAggregateTypeForABI(Ty)) {
664 // Records with non-trivial destructors/copy-constructors should not be
666 if (auto RAA = getRecordArgABI(Ty, getCXXABI()))
667 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
668 // Ignore empty structs/unions.
669 if (isEmptyRecord(getContext(), Ty, true))
670 return ABIArgInfo::getIgnore();
671 // Lower single-element structs to just pass a regular value. TODO: We
672 // could do reasonable-size multiple-element structs too, using getExpand(),
673 // though watch out for things like bitfields.
674 if (const Type *SeltTy = isSingleElementStruct(Ty, getContext()))
675 return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
678 // Otherwise just do the default thing.
679 return DefaultABIInfo::classifyArgumentType(Ty);
682 ABIArgInfo WebAssemblyABIInfo::classifyReturnType(QualType RetTy) const {
683 if (isAggregateTypeForABI(RetTy)) {
684 // Records with non-trivial destructors/copy-constructors should not be
685 // returned by value.
686 if (!getRecordArgABI(RetTy, getCXXABI())) {
687 // Ignore empty structs/unions.
688 if (isEmptyRecord(getContext(), RetTy, true))
689 return ABIArgInfo::getIgnore();
690 // Lower single-element structs to just return a regular value. TODO: We
691 // could do reasonable-size multiple-element structs too, using
692 // ABIArgInfo::getDirect().
693 if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext()))
694 return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
698 // Otherwise just do the default thing.
699 return DefaultABIInfo::classifyReturnType(RetTy);
702 Address WebAssemblyABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
704 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*Indirect=*/ false,
705 getContext().getTypeInfoInChars(Ty),
706 CharUnits::fromQuantity(4),
707 /*AllowHigherAlign=*/ true);
710 //===----------------------------------------------------------------------===//
711 // le32/PNaCl bitcode ABI Implementation
713 // This is a simplified version of the x86_32 ABI. Arguments and return values
714 // are always passed on the stack.
715 //===----------------------------------------------------------------------===//
717 class PNaClABIInfo : public ABIInfo {
719 PNaClABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {}
721 ABIArgInfo classifyReturnType(QualType RetTy) const;
722 ABIArgInfo classifyArgumentType(QualType RetTy) const;
724 void computeInfo(CGFunctionInfo &FI) const override;
725 Address EmitVAArg(CodeGenFunction &CGF,
726 Address VAListAddr, QualType Ty) const override;
729 class PNaClTargetCodeGenInfo : public TargetCodeGenInfo {
731 PNaClTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
732 : TargetCodeGenInfo(new PNaClABIInfo(CGT)) {}
735 void PNaClABIInfo::computeInfo(CGFunctionInfo &FI) const {
736 if (!getCXXABI().classifyReturnType(FI))
737 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
739 for (auto &I : FI.arguments())
740 I.info = classifyArgumentType(I.type);
743 Address PNaClABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
745 // The PNaCL ABI is a bit odd, in that varargs don't use normal
746 // function classification. Structs get passed directly for varargs
747 // functions, through a rewriting transform in
748 // pnacl-llvm/lib/Transforms/NaCl/ExpandVarArgs.cpp, which allows
749 // this target to actually support a va_arg instructions with an
750 // aggregate type, unlike other targets.
751 return EmitVAArgInstr(CGF, VAListAddr, Ty, ABIArgInfo::getDirect());
754 /// \brief Classify argument of given type \p Ty.
755 ABIArgInfo PNaClABIInfo::classifyArgumentType(QualType Ty) const {
756 if (isAggregateTypeForABI(Ty)) {
757 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
758 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
759 return getNaturalAlignIndirect(Ty);
760 } else if (const EnumType *EnumTy = Ty->getAs<EnumType>()) {
761 // Treat an enum type as its underlying type.
762 Ty = EnumTy->getDecl()->getIntegerType();
763 } else if (Ty->isFloatingType()) {
764 // Floating-point types don't go inreg.
765 return ABIArgInfo::getDirect();
768 return (Ty->isPromotableIntegerType() ?
769 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
772 ABIArgInfo PNaClABIInfo::classifyReturnType(QualType RetTy) const {
773 if (RetTy->isVoidType())
774 return ABIArgInfo::getIgnore();
776 // In the PNaCl ABI we always return records/structures on the stack.
777 if (isAggregateTypeForABI(RetTy))
778 return getNaturalAlignIndirect(RetTy);
780 // Treat an enum type as its underlying type.
781 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
782 RetTy = EnumTy->getDecl()->getIntegerType();
784 return (RetTy->isPromotableIntegerType() ?
785 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
788 /// IsX86_MMXType - Return true if this is an MMX type.
789 bool IsX86_MMXType(llvm::Type *IRType) {
790 // Return true if the type is an MMX type <2 x i32>, <4 x i16>, or <8 x i8>.
791 return IRType->isVectorTy() && IRType->getPrimitiveSizeInBits() == 64 &&
792 cast<llvm::VectorType>(IRType)->getElementType()->isIntegerTy() &&
793 IRType->getScalarSizeInBits() != 64;
796 static llvm::Type* X86AdjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
797 StringRef Constraint,
799 if ((Constraint == "y" || Constraint == "&y") && Ty->isVectorTy()) {
800 if (cast<llvm::VectorType>(Ty)->getBitWidth() != 64) {
801 // Invalid MMX constraint
805 return llvm::Type::getX86_MMXTy(CGF.getLLVMContext());
808 // No operation needed
812 /// Returns true if this type can be passed in SSE registers with the
813 /// X86_VectorCall calling convention. Shared between x86_32 and x86_64.
814 static bool isX86VectorTypeForVectorCall(ASTContext &Context, QualType Ty) {
815 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
816 if (BT->isFloatingPoint() && BT->getKind() != BuiltinType::Half)
818 } else if (const VectorType *VT = Ty->getAs<VectorType>()) {
819 // vectorcall can pass XMM, YMM, and ZMM vectors. We don't pass SSE1 MMX
820 // registers specially.
821 unsigned VecSize = Context.getTypeSize(VT);
822 if (VecSize == 128 || VecSize == 256 || VecSize == 512)
828 /// Returns true if this aggregate is small enough to be passed in SSE registers
829 /// in the X86_VectorCall calling convention. Shared between x86_32 and x86_64.
830 static bool isX86VectorCallAggregateSmallEnough(uint64_t NumMembers) {
831 return NumMembers <= 4;
834 //===----------------------------------------------------------------------===//
835 // X86-32 ABI Implementation
836 //===----------------------------------------------------------------------===//
838 /// \brief Similar to llvm::CCState, but for Clang.
840 CCState(unsigned CC) : CC(CC), FreeRegs(0), FreeSSERegs(0) {}
844 unsigned FreeSSERegs;
847 /// X86_32ABIInfo - The X86-32 ABI information.
848 class X86_32ABIInfo : public SwiftABIInfo {
854 static const unsigned MinABIStackAlignInBytes = 4;
856 bool IsDarwinVectorABI;
857 bool IsRetSmallStructInRegABI;
858 bool IsWin32StructABI;
861 unsigned DefaultNumRegisterParameters;
863 static bool isRegisterSize(unsigned Size) {
864 return (Size == 8 || Size == 16 || Size == 32 || Size == 64);
867 bool isHomogeneousAggregateBaseType(QualType Ty) const override {
868 // FIXME: Assumes vectorcall is in use.
869 return isX86VectorTypeForVectorCall(getContext(), Ty);
872 bool isHomogeneousAggregateSmallEnough(const Type *Ty,
873 uint64_t NumMembers) const override {
874 // FIXME: Assumes vectorcall is in use.
875 return isX86VectorCallAggregateSmallEnough(NumMembers);
878 bool shouldReturnTypeInRegister(QualType Ty, ASTContext &Context) const;
880 /// getIndirectResult - Give a source type \arg Ty, return a suitable result
881 /// such that the argument will be passed in memory.
882 ABIArgInfo getIndirectResult(QualType Ty, bool ByVal, CCState &State) const;
884 ABIArgInfo getIndirectReturnResult(QualType Ty, CCState &State) const;
886 /// \brief Return the alignment to use for the given type on the stack.
887 unsigned getTypeStackAlignInBytes(QualType Ty, unsigned Align) const;
889 Class classify(QualType Ty) const;
890 ABIArgInfo classifyReturnType(QualType RetTy, CCState &State) const;
891 ABIArgInfo classifyArgumentType(QualType RetTy, CCState &State) const;
892 /// \brief Updates the number of available free registers, returns
893 /// true if any registers were allocated.
894 bool updateFreeRegs(QualType Ty, CCState &State) const;
896 bool shouldAggregateUseDirect(QualType Ty, CCState &State, bool &InReg,
897 bool &NeedsPadding) const;
898 bool shouldPrimitiveUseInReg(QualType Ty, CCState &State) const;
900 bool canExpandIndirectArgument(QualType Ty) const;
902 /// \brief Rewrite the function info so that all memory arguments use
904 void rewriteWithInAlloca(CGFunctionInfo &FI) const;
906 void addFieldToArgStruct(SmallVector<llvm::Type *, 6> &FrameFields,
907 CharUnits &StackOffset, ABIArgInfo &Info,
908 QualType Type) const;
912 void computeInfo(CGFunctionInfo &FI) const override;
913 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
914 QualType Ty) const override;
916 X86_32ABIInfo(CodeGen::CodeGenTypes &CGT, bool DarwinVectorABI,
917 bool RetSmallStructInRegABI, bool Win32StructABI,
918 unsigned NumRegisterParameters, bool SoftFloatABI)
919 : SwiftABIInfo(CGT), IsDarwinVectorABI(DarwinVectorABI),
920 IsRetSmallStructInRegABI(RetSmallStructInRegABI),
921 IsWin32StructABI(Win32StructABI),
922 IsSoftFloatABI(SoftFloatABI),
923 IsMCUABI(CGT.getTarget().getTriple().isOSIAMCU()),
924 DefaultNumRegisterParameters(NumRegisterParameters) {}
926 bool shouldPassIndirectlyForSwift(CharUnits totalSize,
927 ArrayRef<llvm::Type*> scalars,
928 bool asReturnValue) const override {
929 // LLVM's x86-32 lowering currently only assigns up to three
930 // integer registers and three fp registers. Oddly, it'll use up to
931 // four vector registers for vectors, but those can overlap with the
933 return occupiesMoreThan(CGT, scalars, /*total*/ 3);
937 class X86_32TargetCodeGenInfo : public TargetCodeGenInfo {
939 X86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool DarwinVectorABI,
940 bool RetSmallStructInRegABI, bool Win32StructABI,
941 unsigned NumRegisterParameters, bool SoftFloatABI)
942 : TargetCodeGenInfo(new X86_32ABIInfo(
943 CGT, DarwinVectorABI, RetSmallStructInRegABI, Win32StructABI,
944 NumRegisterParameters, SoftFloatABI)) {}
946 static bool isStructReturnInRegABI(
947 const llvm::Triple &Triple, const CodeGenOptions &Opts);
949 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
950 CodeGen::CodeGenModule &CGM) const override;
952 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
953 // Darwin uses different dwarf register numbers for EH.
954 if (CGM.getTarget().getTriple().isOSDarwin()) return 5;
958 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
959 llvm::Value *Address) const override;
961 llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
962 StringRef Constraint,
963 llvm::Type* Ty) const override {
964 return X86AdjustInlineAsmType(CGF, Constraint, Ty);
967 void addReturnRegisterOutputs(CodeGenFunction &CGF, LValue ReturnValue,
968 std::string &Constraints,
969 std::vector<llvm::Type *> &ResultRegTypes,
970 std::vector<llvm::Type *> &ResultTruncRegTypes,
971 std::vector<LValue> &ResultRegDests,
972 std::string &AsmString,
973 unsigned NumOutputs) const override;
976 getUBSanFunctionSignature(CodeGen::CodeGenModule &CGM) const override {
977 unsigned Sig = (0xeb << 0) | // jmp rel8
978 (0x06 << 8) | // .+0x08
981 return llvm::ConstantInt::get(CGM.Int32Ty, Sig);
984 StringRef getARCRetainAutoreleasedReturnValueMarker() const override {
985 return "movl\t%ebp, %ebp"
986 "\t\t## marker for objc_retainAutoreleaseReturnValue";
992 /// Rewrite input constraint references after adding some output constraints.
993 /// In the case where there is one output and one input and we add one output,
994 /// we need to replace all operand references greater than or equal to 1:
997 /// The result will be:
1000 static void rewriteInputConstraintReferences(unsigned FirstIn,
1001 unsigned NumNewOuts,
1002 std::string &AsmString) {
1004 llvm::raw_string_ostream OS(Buf);
1006 while (Pos < AsmString.size()) {
1007 size_t DollarStart = AsmString.find('$', Pos);
1008 if (DollarStart == std::string::npos)
1009 DollarStart = AsmString.size();
1010 size_t DollarEnd = AsmString.find_first_not_of('$', DollarStart);
1011 if (DollarEnd == std::string::npos)
1012 DollarEnd = AsmString.size();
1013 OS << StringRef(&AsmString[Pos], DollarEnd - Pos);
1015 size_t NumDollars = DollarEnd - DollarStart;
1016 if (NumDollars % 2 != 0 && Pos < AsmString.size()) {
1017 // We have an operand reference.
1018 size_t DigitStart = Pos;
1019 size_t DigitEnd = AsmString.find_first_not_of("0123456789", DigitStart);
1020 if (DigitEnd == std::string::npos)
1021 DigitEnd = AsmString.size();
1022 StringRef OperandStr(&AsmString[DigitStart], DigitEnd - DigitStart);
1023 unsigned OperandIndex;
1024 if (!OperandStr.getAsInteger(10, OperandIndex)) {
1025 if (OperandIndex >= FirstIn)
1026 OperandIndex += NumNewOuts;
1034 AsmString = std::move(OS.str());
1037 /// Add output constraints for EAX:EDX because they are return registers.
1038 void X86_32TargetCodeGenInfo::addReturnRegisterOutputs(
1039 CodeGenFunction &CGF, LValue ReturnSlot, std::string &Constraints,
1040 std::vector<llvm::Type *> &ResultRegTypes,
1041 std::vector<llvm::Type *> &ResultTruncRegTypes,
1042 std::vector<LValue> &ResultRegDests, std::string &AsmString,
1043 unsigned NumOutputs) const {
1044 uint64_t RetWidth = CGF.getContext().getTypeSize(ReturnSlot.getType());
1046 // Use the EAX constraint if the width is 32 or smaller and EAX:EDX if it is
1048 if (!Constraints.empty())
1050 if (RetWidth <= 32) {
1051 Constraints += "={eax}";
1052 ResultRegTypes.push_back(CGF.Int32Ty);
1054 // Use the 'A' constraint for EAX:EDX.
1055 Constraints += "=A";
1056 ResultRegTypes.push_back(CGF.Int64Ty);
1059 // Truncate EAX or EAX:EDX to an integer of the appropriate size.
1060 llvm::Type *CoerceTy = llvm::IntegerType::get(CGF.getLLVMContext(), RetWidth);
1061 ResultTruncRegTypes.push_back(CoerceTy);
1063 // Coerce the integer by bitcasting the return slot pointer.
1064 ReturnSlot.setAddress(CGF.Builder.CreateBitCast(ReturnSlot.getAddress(),
1065 CoerceTy->getPointerTo()));
1066 ResultRegDests.push_back(ReturnSlot);
1068 rewriteInputConstraintReferences(NumOutputs, 1, AsmString);
1071 /// shouldReturnTypeInRegister - Determine if the given type should be
1072 /// returned in a register (for the Darwin and MCU ABI).
1073 bool X86_32ABIInfo::shouldReturnTypeInRegister(QualType Ty,
1074 ASTContext &Context) const {
1075 uint64_t Size = Context.getTypeSize(Ty);
1077 // For i386, type must be register sized.
1078 // For the MCU ABI, it only needs to be <= 8-byte
1079 if ((IsMCUABI && Size > 64) || (!IsMCUABI && !isRegisterSize(Size)))
1082 if (Ty->isVectorType()) {
1083 // 64- and 128- bit vectors inside structures are not returned in
1085 if (Size == 64 || Size == 128)
1091 // If this is a builtin, pointer, enum, complex type, member pointer, or
1092 // member function pointer it is ok.
1093 if (Ty->getAs<BuiltinType>() || Ty->hasPointerRepresentation() ||
1094 Ty->isAnyComplexType() || Ty->isEnumeralType() ||
1095 Ty->isBlockPointerType() || Ty->isMemberPointerType())
1098 // Arrays are treated like records.
1099 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty))
1100 return shouldReturnTypeInRegister(AT->getElementType(), Context);
1102 // Otherwise, it must be a record type.
1103 const RecordType *RT = Ty->getAs<RecordType>();
1104 if (!RT) return false;
1106 // FIXME: Traverse bases here too.
1108 // Structure types are passed in register if all fields would be
1109 // passed in a register.
1110 for (const auto *FD : RT->getDecl()->fields()) {
1111 // Empty fields are ignored.
1112 if (isEmptyField(Context, FD, true))
1115 // Check fields recursively.
1116 if (!shouldReturnTypeInRegister(FD->getType(), Context))
1122 static bool is32Or64BitBasicType(QualType Ty, ASTContext &Context) {
1123 // Treat complex types as the element type.
1124 if (const ComplexType *CTy = Ty->getAs<ComplexType>())
1125 Ty = CTy->getElementType();
1127 // Check for a type which we know has a simple scalar argument-passing
1128 // convention without any padding. (We're specifically looking for 32
1129 // and 64-bit integer and integer-equivalents, float, and double.)
1130 if (!Ty->getAs<BuiltinType>() && !Ty->hasPointerRepresentation() &&
1131 !Ty->isEnumeralType() && !Ty->isBlockPointerType())
1134 uint64_t Size = Context.getTypeSize(Ty);
1135 return Size == 32 || Size == 64;
1138 /// Test whether an argument type which is to be passed indirectly (on the
1139 /// stack) would have the equivalent layout if it was expanded into separate
1140 /// arguments. If so, we prefer to do the latter to avoid inhibiting
1142 bool X86_32ABIInfo::canExpandIndirectArgument(QualType Ty) const {
1143 // We can only expand structure types.
1144 const RecordType *RT = Ty->getAs<RecordType>();
1147 const RecordDecl *RD = RT->getDecl();
1148 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
1149 if (!IsWin32StructABI ) {
1150 // On non-Windows, we have to conservatively match our old bitcode
1151 // prototypes in order to be ABI-compatible at the bitcode level.
1152 if (!CXXRD->isCLike())
1155 // Don't do this for dynamic classes.
1156 if (CXXRD->isDynamicClass())
1158 // Don't do this if there are any non-empty bases.
1159 for (const CXXBaseSpecifier &Base : CXXRD->bases()) {
1160 if (!isEmptyRecord(getContext(), Base.getType(), /*AllowArrays=*/true))
1168 for (const auto *FD : RD->fields()) {
1169 // Scalar arguments on the stack get 4 byte alignment on x86. If the
1170 // argument is smaller than 32-bits, expanding the struct will create
1171 // alignment padding.
1172 if (!is32Or64BitBasicType(FD->getType(), getContext()))
1175 // FIXME: Reject bit-fields wholesale; there are two problems, we don't know
1176 // how to expand them yet, and the predicate for telling if a bitfield still
1177 // counts as "basic" is more complicated than what we were doing previously.
1178 if (FD->isBitField())
1181 Size += getContext().getTypeSize(FD->getType());
1184 // We can do this if there was no alignment padding.
1185 return Size == getContext().getTypeSize(Ty);
1188 ABIArgInfo X86_32ABIInfo::getIndirectReturnResult(QualType RetTy, CCState &State) const {
1189 // If the return value is indirect, then the hidden argument is consuming one
1190 // integer register.
1191 if (State.FreeRegs) {
1194 return getNaturalAlignIndirectInReg(RetTy);
1196 return getNaturalAlignIndirect(RetTy, /*ByVal=*/false);
1199 ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy,
1200 CCState &State) const {
1201 if (RetTy->isVoidType())
1202 return ABIArgInfo::getIgnore();
1204 const Type *Base = nullptr;
1205 uint64_t NumElts = 0;
1206 if (State.CC == llvm::CallingConv::X86_VectorCall &&
1207 isHomogeneousAggregate(RetTy, Base, NumElts)) {
1208 // The LLVM struct type for such an aggregate should lower properly.
1209 return ABIArgInfo::getDirect();
1212 if (const VectorType *VT = RetTy->getAs<VectorType>()) {
1213 // On Darwin, some vectors are returned in registers.
1214 if (IsDarwinVectorABI) {
1215 uint64_t Size = getContext().getTypeSize(RetTy);
1217 // 128-bit vectors are a special case; they are returned in
1218 // registers and we need to make sure to pick a type the LLVM
1219 // backend will like.
1221 return ABIArgInfo::getDirect(llvm::VectorType::get(
1222 llvm::Type::getInt64Ty(getVMContext()), 2));
1224 // Always return in register if it fits in a general purpose
1225 // register, or if it is 64 bits and has a single element.
1226 if ((Size == 8 || Size == 16 || Size == 32) ||
1227 (Size == 64 && VT->getNumElements() == 1))
1228 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
1231 return getIndirectReturnResult(RetTy, State);
1234 return ABIArgInfo::getDirect();
1237 if (isAggregateTypeForABI(RetTy)) {
1238 if (const RecordType *RT = RetTy->getAs<RecordType>()) {
1239 // Structures with flexible arrays are always indirect.
1240 if (RT->getDecl()->hasFlexibleArrayMember())
1241 return getIndirectReturnResult(RetTy, State);
1244 // If specified, structs and unions are always indirect.
1245 if (!IsRetSmallStructInRegABI && !RetTy->isAnyComplexType())
1246 return getIndirectReturnResult(RetTy, State);
1248 // Ignore empty structs/unions.
1249 if (isEmptyRecord(getContext(), RetTy, true))
1250 return ABIArgInfo::getIgnore();
1252 // Small structures which are register sized are generally returned
1254 if (shouldReturnTypeInRegister(RetTy, getContext())) {
1255 uint64_t Size = getContext().getTypeSize(RetTy);
1257 // As a special-case, if the struct is a "single-element" struct, and
1258 // the field is of type "float" or "double", return it in a
1259 // floating-point register. (MSVC does not apply this special case.)
1260 // We apply a similar transformation for pointer types to improve the
1261 // quality of the generated IR.
1262 if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext()))
1263 if ((!IsWin32StructABI && SeltTy->isRealFloatingType())
1264 || SeltTy->hasPointerRepresentation())
1265 return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
1267 // FIXME: We should be able to narrow this integer in cases with dead
1269 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),Size));
1272 return getIndirectReturnResult(RetTy, State);
1275 // Treat an enum type as its underlying type.
1276 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
1277 RetTy = EnumTy->getDecl()->getIntegerType();
1279 return (RetTy->isPromotableIntegerType() ?
1280 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
1283 static bool isSSEVectorType(ASTContext &Context, QualType Ty) {
1284 return Ty->getAs<VectorType>() && Context.getTypeSize(Ty) == 128;
1287 static bool isRecordWithSSEVectorType(ASTContext &Context, QualType Ty) {
1288 const RecordType *RT = Ty->getAs<RecordType>();
1291 const RecordDecl *RD = RT->getDecl();
1293 // If this is a C++ record, check the bases first.
1294 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
1295 for (const auto &I : CXXRD->bases())
1296 if (!isRecordWithSSEVectorType(Context, I.getType()))
1299 for (const auto *i : RD->fields()) {
1300 QualType FT = i->getType();
1302 if (isSSEVectorType(Context, FT))
1305 if (isRecordWithSSEVectorType(Context, FT))
1312 unsigned X86_32ABIInfo::getTypeStackAlignInBytes(QualType Ty,
1313 unsigned Align) const {
1314 // Otherwise, if the alignment is less than or equal to the minimum ABI
1315 // alignment, just use the default; the backend will handle this.
1316 if (Align <= MinABIStackAlignInBytes)
1317 return 0; // Use default alignment.
1319 // On non-Darwin, the stack type alignment is always 4.
1320 if (!IsDarwinVectorABI) {
1321 // Set explicit alignment, since we may need to realign the top.
1322 return MinABIStackAlignInBytes;
1325 // Otherwise, if the type contains an SSE vector type, the alignment is 16.
1326 if (Align >= 16 && (isSSEVectorType(getContext(), Ty) ||
1327 isRecordWithSSEVectorType(getContext(), Ty)))
1330 return MinABIStackAlignInBytes;
1333 ABIArgInfo X86_32ABIInfo::getIndirectResult(QualType Ty, bool ByVal,
1334 CCState &State) const {
1336 if (State.FreeRegs) {
1337 --State.FreeRegs; // Non-byval indirects just use one pointer.
1339 return getNaturalAlignIndirectInReg(Ty);
1341 return getNaturalAlignIndirect(Ty, false);
1344 // Compute the byval alignment.
1345 unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8;
1346 unsigned StackAlign = getTypeStackAlignInBytes(Ty, TypeAlign);
1347 if (StackAlign == 0)
1348 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(4), /*ByVal=*/true);
1350 // If the stack alignment is less than the type alignment, realign the
1352 bool Realign = TypeAlign > StackAlign;
1353 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(StackAlign),
1354 /*ByVal=*/true, Realign);
1357 X86_32ABIInfo::Class X86_32ABIInfo::classify(QualType Ty) const {
1358 const Type *T = isSingleElementStruct(Ty, getContext());
1360 T = Ty.getTypePtr();
1362 if (const BuiltinType *BT = T->getAs<BuiltinType>()) {
1363 BuiltinType::Kind K = BT->getKind();
1364 if (K == BuiltinType::Float || K == BuiltinType::Double)
1370 bool X86_32ABIInfo::updateFreeRegs(QualType Ty, CCState &State) const {
1371 if (!IsSoftFloatABI) {
1372 Class C = classify(Ty);
1377 unsigned Size = getContext().getTypeSize(Ty);
1378 unsigned SizeInRegs = (Size + 31) / 32;
1380 if (SizeInRegs == 0)
1384 if (SizeInRegs > State.FreeRegs) {
1389 // The MCU psABI allows passing parameters in-reg even if there are
1390 // earlier parameters that are passed on the stack. Also,
1391 // it does not allow passing >8-byte structs in-register,
1392 // even if there are 3 free registers available.
1393 if (SizeInRegs > State.FreeRegs || SizeInRegs > 2)
1397 State.FreeRegs -= SizeInRegs;
1401 bool X86_32ABIInfo::shouldAggregateUseDirect(QualType Ty, CCState &State,
1403 bool &NeedsPadding) const {
1404 // On Windows, aggregates other than HFAs are never passed in registers, and
1405 // they do not consume register slots. Homogenous floating-point aggregates
1406 // (HFAs) have already been dealt with at this point.
1407 if (IsWin32StructABI && isAggregateTypeForABI(Ty))
1410 NeedsPadding = false;
1413 if (!updateFreeRegs(Ty, State))
1419 if (State.CC == llvm::CallingConv::X86_FastCall ||
1420 State.CC == llvm::CallingConv::X86_VectorCall) {
1421 if (getContext().getTypeSize(Ty) <= 32 && State.FreeRegs)
1422 NeedsPadding = true;
1430 bool X86_32ABIInfo::shouldPrimitiveUseInReg(QualType Ty, CCState &State) const {
1431 if (!updateFreeRegs(Ty, State))
1437 if (State.CC == llvm::CallingConv::X86_FastCall ||
1438 State.CC == llvm::CallingConv::X86_VectorCall) {
1439 if (getContext().getTypeSize(Ty) > 32)
1442 return (Ty->isIntegralOrEnumerationType() || Ty->isPointerType() ||
1443 Ty->isReferenceType());
1449 ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty,
1450 CCState &State) const {
1451 // FIXME: Set alignment on indirect arguments.
1453 Ty = useFirstFieldIfTransparentUnion(Ty);
1455 // Check with the C++ ABI first.
1456 const RecordType *RT = Ty->getAs<RecordType>();
1458 CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI());
1459 if (RAA == CGCXXABI::RAA_Indirect) {
1460 return getIndirectResult(Ty, false, State);
1461 } else if (RAA == CGCXXABI::RAA_DirectInMemory) {
1462 // The field index doesn't matter, we'll fix it up later.
1463 return ABIArgInfo::getInAlloca(/*FieldIndex=*/0);
1467 // vectorcall adds the concept of a homogenous vector aggregate, similar
1468 // to other targets.
1469 const Type *Base = nullptr;
1470 uint64_t NumElts = 0;
1471 if (State.CC == llvm::CallingConv::X86_VectorCall &&
1472 isHomogeneousAggregate(Ty, Base, NumElts)) {
1473 if (State.FreeSSERegs >= NumElts) {
1474 State.FreeSSERegs -= NumElts;
1475 if (Ty->isBuiltinType() || Ty->isVectorType())
1476 return ABIArgInfo::getDirect();
1477 return ABIArgInfo::getExpand();
1479 return getIndirectResult(Ty, /*ByVal=*/false, State);
1482 if (isAggregateTypeForABI(Ty)) {
1483 // Structures with flexible arrays are always indirect.
1484 // FIXME: This should not be byval!
1485 if (RT && RT->getDecl()->hasFlexibleArrayMember())
1486 return getIndirectResult(Ty, true, State);
1488 // Ignore empty structs/unions on non-Windows.
1489 if (!IsWin32StructABI && isEmptyRecord(getContext(), Ty, true))
1490 return ABIArgInfo::getIgnore();
1492 llvm::LLVMContext &LLVMContext = getVMContext();
1493 llvm::IntegerType *Int32 = llvm::Type::getInt32Ty(LLVMContext);
1494 bool NeedsPadding = false;
1496 if (shouldAggregateUseDirect(Ty, State, InReg, NeedsPadding)) {
1497 unsigned SizeInRegs = (getContext().getTypeSize(Ty) + 31) / 32;
1498 SmallVector<llvm::Type*, 3> Elements(SizeInRegs, Int32);
1499 llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements);
1501 return ABIArgInfo::getDirectInReg(Result);
1503 return ABIArgInfo::getDirect(Result);
1505 llvm::IntegerType *PaddingType = NeedsPadding ? Int32 : nullptr;
1507 // Expand small (<= 128-bit) record types when we know that the stack layout
1508 // of those arguments will match the struct. This is important because the
1509 // LLVM backend isn't smart enough to remove byval, which inhibits many
1511 // Don't do this for the MCU if there are still free integer registers
1512 // (see X86_64 ABI for full explanation).
1513 if (getContext().getTypeSize(Ty) <= 4 * 32 &&
1514 (!IsMCUABI || State.FreeRegs == 0) && canExpandIndirectArgument(Ty))
1515 return ABIArgInfo::getExpandWithPadding(
1516 State.CC == llvm::CallingConv::X86_FastCall ||
1517 State.CC == llvm::CallingConv::X86_VectorCall,
1520 return getIndirectResult(Ty, true, State);
1523 if (const VectorType *VT = Ty->getAs<VectorType>()) {
1524 // On Darwin, some vectors are passed in memory, we handle this by passing
1525 // it as an i8/i16/i32/i64.
1526 if (IsDarwinVectorABI) {
1527 uint64_t Size = getContext().getTypeSize(Ty);
1528 if ((Size == 8 || Size == 16 || Size == 32) ||
1529 (Size == 64 && VT->getNumElements() == 1))
1530 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
1534 if (IsX86_MMXType(CGT.ConvertType(Ty)))
1535 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 64));
1537 return ABIArgInfo::getDirect();
1541 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
1542 Ty = EnumTy->getDecl()->getIntegerType();
1544 bool InReg = shouldPrimitiveUseInReg(Ty, State);
1546 if (Ty->isPromotableIntegerType()) {
1548 return ABIArgInfo::getExtendInReg();
1549 return ABIArgInfo::getExtend();
1553 return ABIArgInfo::getDirectInReg();
1554 return ABIArgInfo::getDirect();
1557 void X86_32ABIInfo::computeInfo(CGFunctionInfo &FI) const {
1558 CCState State(FI.getCallingConvention());
1561 else if (State.CC == llvm::CallingConv::X86_FastCall)
1563 else if (State.CC == llvm::CallingConv::X86_VectorCall) {
1565 State.FreeSSERegs = 6;
1566 } else if (FI.getHasRegParm())
1567 State.FreeRegs = FI.getRegParm();
1569 State.FreeRegs = DefaultNumRegisterParameters;
1571 if (!getCXXABI().classifyReturnType(FI)) {
1572 FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), State);
1573 } else if (FI.getReturnInfo().isIndirect()) {
1574 // The C++ ABI is not aware of register usage, so we have to check if the
1575 // return value was sret and put it in a register ourselves if appropriate.
1576 if (State.FreeRegs) {
1577 --State.FreeRegs; // The sret parameter consumes a register.
1579 FI.getReturnInfo().setInReg(true);
1583 // The chain argument effectively gives us another free register.
1584 if (FI.isChainCall())
1587 bool UsedInAlloca = false;
1588 for (auto &I : FI.arguments()) {
1589 I.info = classifyArgumentType(I.type, State);
1590 UsedInAlloca |= (I.info.getKind() == ABIArgInfo::InAlloca);
1593 // If we needed to use inalloca for any argument, do a second pass and rewrite
1594 // all the memory arguments to use inalloca.
1596 rewriteWithInAlloca(FI);
1600 X86_32ABIInfo::addFieldToArgStruct(SmallVector<llvm::Type *, 6> &FrameFields,
1601 CharUnits &StackOffset, ABIArgInfo &Info,
1602 QualType Type) const {
1603 // Arguments are always 4-byte-aligned.
1604 CharUnits FieldAlign = CharUnits::fromQuantity(4);
1606 assert(StackOffset.isMultipleOf(FieldAlign) && "unaligned inalloca struct");
1607 Info = ABIArgInfo::getInAlloca(FrameFields.size());
1608 FrameFields.push_back(CGT.ConvertTypeForMem(Type));
1609 StackOffset += getContext().getTypeSizeInChars(Type);
1611 // Insert padding bytes to respect alignment.
1612 CharUnits FieldEnd = StackOffset;
1613 StackOffset = FieldEnd.alignTo(FieldAlign);
1614 if (StackOffset != FieldEnd) {
1615 CharUnits NumBytes = StackOffset - FieldEnd;
1616 llvm::Type *Ty = llvm::Type::getInt8Ty(getVMContext());
1617 Ty = llvm::ArrayType::get(Ty, NumBytes.getQuantity());
1618 FrameFields.push_back(Ty);
1622 static bool isArgInAlloca(const ABIArgInfo &Info) {
1623 // Leave ignored and inreg arguments alone.
1624 switch (Info.getKind()) {
1625 case ABIArgInfo::InAlloca:
1627 case ABIArgInfo::Indirect:
1628 assert(Info.getIndirectByVal());
1630 case ABIArgInfo::Ignore:
1632 case ABIArgInfo::Direct:
1633 case ABIArgInfo::Extend:
1634 if (Info.getInReg())
1637 case ABIArgInfo::Expand:
1638 case ABIArgInfo::CoerceAndExpand:
1639 // These are aggregate types which are never passed in registers when
1640 // inalloca is involved.
1643 llvm_unreachable("invalid enum");
1646 void X86_32ABIInfo::rewriteWithInAlloca(CGFunctionInfo &FI) const {
1647 assert(IsWin32StructABI && "inalloca only supported on win32");
1649 // Build a packed struct type for all of the arguments in memory.
1650 SmallVector<llvm::Type *, 6> FrameFields;
1652 // The stack alignment is always 4.
1653 CharUnits StackAlign = CharUnits::fromQuantity(4);
1655 CharUnits StackOffset;
1656 CGFunctionInfo::arg_iterator I = FI.arg_begin(), E = FI.arg_end();
1658 // Put 'this' into the struct before 'sret', if necessary.
1660 FI.getCallingConvention() == llvm::CallingConv::X86_ThisCall;
1661 ABIArgInfo &Ret = FI.getReturnInfo();
1662 if (Ret.isIndirect() && Ret.isSRetAfterThis() && !IsThisCall &&
1663 isArgInAlloca(I->info)) {
1664 addFieldToArgStruct(FrameFields, StackOffset, I->info, I->type);
1668 // Put the sret parameter into the inalloca struct if it's in memory.
1669 if (Ret.isIndirect() && !Ret.getInReg()) {
1670 CanQualType PtrTy = getContext().getPointerType(FI.getReturnType());
1671 addFieldToArgStruct(FrameFields, StackOffset, Ret, PtrTy);
1672 // On Windows, the hidden sret parameter is always returned in eax.
1673 Ret.setInAllocaSRet(IsWin32StructABI);
1676 // Skip the 'this' parameter in ecx.
1680 // Put arguments passed in memory into the struct.
1681 for (; I != E; ++I) {
1682 if (isArgInAlloca(I->info))
1683 addFieldToArgStruct(FrameFields, StackOffset, I->info, I->type);
1686 FI.setArgStruct(llvm::StructType::get(getVMContext(), FrameFields,
1691 Address X86_32ABIInfo::EmitVAArg(CodeGenFunction &CGF,
1692 Address VAListAddr, QualType Ty) const {
1694 auto TypeInfo = getContext().getTypeInfoInChars(Ty);
1696 // x86-32 changes the alignment of certain arguments on the stack.
1698 // Just messing with TypeInfo like this works because we never pass
1699 // anything indirectly.
1700 TypeInfo.second = CharUnits::fromQuantity(
1701 getTypeStackAlignInBytes(Ty, TypeInfo.second.getQuantity()));
1703 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*Indirect*/ false,
1704 TypeInfo, CharUnits::fromQuantity(4),
1705 /*AllowHigherAlign*/ true);
1708 bool X86_32TargetCodeGenInfo::isStructReturnInRegABI(
1709 const llvm::Triple &Triple, const CodeGenOptions &Opts) {
1710 assert(Triple.getArch() == llvm::Triple::x86);
1712 switch (Opts.getStructReturnConvention()) {
1713 case CodeGenOptions::SRCK_Default:
1715 case CodeGenOptions::SRCK_OnStack: // -fpcc-struct-return
1717 case CodeGenOptions::SRCK_InRegs: // -freg-struct-return
1721 if (Triple.isOSDarwin() || Triple.isOSIAMCU())
1724 switch (Triple.getOS()) {
1725 case llvm::Triple::DragonFly:
1726 case llvm::Triple::FreeBSD:
1727 case llvm::Triple::OpenBSD:
1728 case llvm::Triple::Bitrig:
1729 case llvm::Triple::Win32:
1736 void X86_32TargetCodeGenInfo::setTargetAttributes(const Decl *D,
1737 llvm::GlobalValue *GV,
1738 CodeGen::CodeGenModule &CGM) const {
1739 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
1740 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
1741 // Get the LLVM function.
1742 llvm::Function *Fn = cast<llvm::Function>(GV);
1744 // Now add the 'alignstack' attribute with a value of 16.
1745 llvm::AttrBuilder B;
1746 B.addStackAlignmentAttr(16);
1747 Fn->addAttributes(llvm::AttributeSet::FunctionIndex,
1748 llvm::AttributeSet::get(CGM.getLLVMContext(),
1749 llvm::AttributeSet::FunctionIndex,
1752 if (FD->hasAttr<AnyX86InterruptAttr>()) {
1753 llvm::Function *Fn = cast<llvm::Function>(GV);
1754 Fn->setCallingConv(llvm::CallingConv::X86_INTR);
1759 bool X86_32TargetCodeGenInfo::initDwarfEHRegSizeTable(
1760 CodeGen::CodeGenFunction &CGF,
1761 llvm::Value *Address) const {
1762 CodeGen::CGBuilderTy &Builder = CGF.Builder;
1764 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4);
1766 // 0-7 are the eight integer registers; the order is different
1767 // on Darwin (for EH), but the range is the same.
1769 AssignToArrayRange(Builder, Address, Four8, 0, 8);
1771 if (CGF.CGM.getTarget().getTriple().isOSDarwin()) {
1772 // 12-16 are st(0..4). Not sure why we stop at 4.
1773 // These have size 16, which is sizeof(long double) on
1774 // platforms with 8-byte alignment for that type.
1775 llvm::Value *Sixteen8 = llvm::ConstantInt::get(CGF.Int8Ty, 16);
1776 AssignToArrayRange(Builder, Address, Sixteen8, 12, 16);
1779 // 9 is %eflags, which doesn't get a size on Darwin for some
1781 Builder.CreateAlignedStore(
1782 Four8, Builder.CreateConstInBoundsGEP1_32(CGF.Int8Ty, Address, 9),
1785 // 11-16 are st(0..5). Not sure why we stop at 5.
1786 // These have size 12, which is sizeof(long double) on
1787 // platforms with 4-byte alignment for that type.
1788 llvm::Value *Twelve8 = llvm::ConstantInt::get(CGF.Int8Ty, 12);
1789 AssignToArrayRange(Builder, Address, Twelve8, 11, 16);
1795 //===----------------------------------------------------------------------===//
1796 // X86-64 ABI Implementation
1797 //===----------------------------------------------------------------------===//
1801 /// The AVX ABI level for X86 targets.
1802 enum class X86AVXABILevel {
1808 /// \p returns the size in bits of the largest (native) vector for \p AVXLevel.
1809 static unsigned getNativeVectorSizeForAVXABI(X86AVXABILevel AVXLevel) {
1811 case X86AVXABILevel::AVX512:
1813 case X86AVXABILevel::AVX:
1815 case X86AVXABILevel::None:
1818 llvm_unreachable("Unknown AVXLevel");
1821 /// X86_64ABIInfo - The X86_64 ABI information.
1822 class X86_64ABIInfo : public SwiftABIInfo {
1834 /// merge - Implement the X86_64 ABI merging algorithm.
1836 /// Merge an accumulating classification \arg Accum with a field
1837 /// classification \arg Field.
1839 /// \param Accum - The accumulating classification. This should
1840 /// always be either NoClass or the result of a previous merge
1841 /// call. In addition, this should never be Memory (the caller
1842 /// should just return Memory for the aggregate).
1843 static Class merge(Class Accum, Class Field);
1845 /// postMerge - Implement the X86_64 ABI post merging algorithm.
1847 /// Post merger cleanup, reduces a malformed Hi and Lo pair to
1848 /// final MEMORY or SSE classes when necessary.
1850 /// \param AggregateSize - The size of the current aggregate in
1851 /// the classification process.
1853 /// \param Lo - The classification for the parts of the type
1854 /// residing in the low word of the containing object.
1856 /// \param Hi - The classification for the parts of the type
1857 /// residing in the higher words of the containing object.
1859 void postMerge(unsigned AggregateSize, Class &Lo, Class &Hi) const;
1861 /// classify - Determine the x86_64 register classes in which the
1862 /// given type T should be passed.
1864 /// \param Lo - The classification for the parts of the type
1865 /// residing in the low word of the containing object.
1867 /// \param Hi - The classification for the parts of the type
1868 /// residing in the high word of the containing object.
1870 /// \param OffsetBase - The bit offset of this type in the
1871 /// containing object. Some parameters are classified different
1872 /// depending on whether they straddle an eightbyte boundary.
1874 /// \param isNamedArg - Whether the argument in question is a "named"
1875 /// argument, as used in AMD64-ABI 3.5.7.
1877 /// If a word is unused its result will be NoClass; if a type should
1878 /// be passed in Memory then at least the classification of \arg Lo
1881 /// The \arg Lo class will be NoClass iff the argument is ignored.
1883 /// If the \arg Lo class is ComplexX87, then the \arg Hi class will
1884 /// also be ComplexX87.
1885 void classify(QualType T, uint64_t OffsetBase, Class &Lo, Class &Hi,
1886 bool isNamedArg) const;
1888 llvm::Type *GetByteVectorType(QualType Ty) const;
1889 llvm::Type *GetSSETypeAtOffset(llvm::Type *IRType,
1890 unsigned IROffset, QualType SourceTy,
1891 unsigned SourceOffset) const;
1892 llvm::Type *GetINTEGERTypeAtOffset(llvm::Type *IRType,
1893 unsigned IROffset, QualType SourceTy,
1894 unsigned SourceOffset) const;
1896 /// getIndirectResult - Give a source type \arg Ty, return a suitable result
1897 /// such that the argument will be returned in memory.
1898 ABIArgInfo getIndirectReturnResult(QualType Ty) const;
1900 /// getIndirectResult - Give a source type \arg Ty, return a suitable result
1901 /// such that the argument will be passed in memory.
1903 /// \param freeIntRegs - The number of free integer registers remaining
1905 ABIArgInfo getIndirectResult(QualType Ty, unsigned freeIntRegs) const;
1907 ABIArgInfo classifyReturnType(QualType RetTy) const;
1909 ABIArgInfo classifyArgumentType(QualType Ty,
1910 unsigned freeIntRegs,
1911 unsigned &neededInt,
1912 unsigned &neededSSE,
1913 bool isNamedArg) const;
1915 bool IsIllegalVectorType(QualType Ty) const;
1917 /// The 0.98 ABI revision clarified a lot of ambiguities,
1918 /// unfortunately in ways that were not always consistent with
1919 /// certain previous compilers. In particular, platforms which
1920 /// required strict binary compatibility with older versions of GCC
1921 /// may need to exempt themselves.
1922 bool honorsRevision0_98() const {
1923 return !getTarget().getTriple().isOSDarwin();
1926 /// GCC classifies <1 x long long> as SSE but compatibility with older clang
1927 // compilers require us to classify it as INTEGER.
1928 bool classifyIntegerMMXAsSSE() const {
1929 const llvm::Triple &Triple = getTarget().getTriple();
1930 if (Triple.isOSDarwin() || Triple.getOS() == llvm::Triple::PS4)
1932 if (Triple.isOSFreeBSD() && Triple.getOSMajorVersion() >= 10)
1937 X86AVXABILevel AVXLevel;
1938 // Some ABIs (e.g. X32 ABI and Native Client OS) use 32 bit pointers on
1940 bool Has64BitPointers;
1943 X86_64ABIInfo(CodeGen::CodeGenTypes &CGT, X86AVXABILevel AVXLevel) :
1944 SwiftABIInfo(CGT), AVXLevel(AVXLevel),
1945 Has64BitPointers(CGT.getDataLayout().getPointerSize(0) == 8) {
1948 bool isPassedUsingAVXType(QualType type) const {
1949 unsigned neededInt, neededSSE;
1950 // The freeIntRegs argument doesn't matter here.
1951 ABIArgInfo info = classifyArgumentType(type, 0, neededInt, neededSSE,
1952 /*isNamedArg*/true);
1953 if (info.isDirect()) {
1954 llvm::Type *ty = info.getCoerceToType();
1955 if (llvm::VectorType *vectorTy = dyn_cast_or_null<llvm::VectorType>(ty))
1956 return (vectorTy->getBitWidth() > 128);
1961 void computeInfo(CGFunctionInfo &FI) const override;
1963 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
1964 QualType Ty) const override;
1965 Address EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
1966 QualType Ty) const override;
1968 bool has64BitPointers() const {
1969 return Has64BitPointers;
1972 bool shouldPassIndirectlyForSwift(CharUnits totalSize,
1973 ArrayRef<llvm::Type*> scalars,
1974 bool asReturnValue) const override {
1975 return occupiesMoreThan(CGT, scalars, /*total*/ 4);
1979 /// WinX86_64ABIInfo - The Windows X86_64 ABI information.
1980 class WinX86_64ABIInfo : public ABIInfo {
1982 WinX86_64ABIInfo(CodeGen::CodeGenTypes &CGT)
1984 IsMingw64(getTarget().getTriple().isWindowsGNUEnvironment()) {}
1986 void computeInfo(CGFunctionInfo &FI) const override;
1988 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
1989 QualType Ty) const override;
1991 bool isHomogeneousAggregateBaseType(QualType Ty) const override {
1992 // FIXME: Assumes vectorcall is in use.
1993 return isX86VectorTypeForVectorCall(getContext(), Ty);
1996 bool isHomogeneousAggregateSmallEnough(const Type *Ty,
1997 uint64_t NumMembers) const override {
1998 // FIXME: Assumes vectorcall is in use.
1999 return isX86VectorCallAggregateSmallEnough(NumMembers);
2003 ABIArgInfo classify(QualType Ty, unsigned &FreeSSERegs,
2004 bool IsReturnType) const;
2009 class X86_64TargetCodeGenInfo : public TargetCodeGenInfo {
2011 X86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, X86AVXABILevel AVXLevel)
2012 : TargetCodeGenInfo(new X86_64ABIInfo(CGT, AVXLevel)) {}
2014 const X86_64ABIInfo &getABIInfo() const {
2015 return static_cast<const X86_64ABIInfo&>(TargetCodeGenInfo::getABIInfo());
2018 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
2022 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
2023 llvm::Value *Address) const override {
2024 llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8);
2026 // 0-15 are the 16 integer registers.
2028 AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 16);
2032 llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
2033 StringRef Constraint,
2034 llvm::Type* Ty) const override {
2035 return X86AdjustInlineAsmType(CGF, Constraint, Ty);
2038 bool isNoProtoCallVariadic(const CallArgList &args,
2039 const FunctionNoProtoType *fnType) const override {
2040 // The default CC on x86-64 sets %al to the number of SSA
2041 // registers used, and GCC sets this when calling an unprototyped
2042 // function, so we override the default behavior. However, don't do
2043 // that when AVX types are involved: the ABI explicitly states it is
2044 // undefined, and it doesn't work in practice because of how the ABI
2045 // defines varargs anyway.
2046 if (fnType->getCallConv() == CC_C) {
2047 bool HasAVXType = false;
2048 for (CallArgList::const_iterator
2049 it = args.begin(), ie = args.end(); it != ie; ++it) {
2050 if (getABIInfo().isPassedUsingAVXType(it->Ty)) {
2060 return TargetCodeGenInfo::isNoProtoCallVariadic(args, fnType);
2064 getUBSanFunctionSignature(CodeGen::CodeGenModule &CGM) const override {
2066 if (getABIInfo().has64BitPointers())
2067 Sig = (0xeb << 0) | // jmp rel8
2068 (0x0a << 8) | // .+0x0c
2072 Sig = (0xeb << 0) | // jmp rel8
2073 (0x06 << 8) | // .+0x08
2076 return llvm::ConstantInt::get(CGM.Int32Ty, Sig);
2079 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
2080 CodeGen::CodeGenModule &CGM) const override {
2081 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
2082 if (FD->hasAttr<AnyX86InterruptAttr>()) {
2083 llvm::Function *Fn = cast<llvm::Function>(GV);
2084 Fn->setCallingConv(llvm::CallingConv::X86_INTR);
2090 class PS4TargetCodeGenInfo : public X86_64TargetCodeGenInfo {
2092 PS4TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, X86AVXABILevel AVXLevel)
2093 : X86_64TargetCodeGenInfo(CGT, AVXLevel) {}
2095 void getDependentLibraryOption(llvm::StringRef Lib,
2096 llvm::SmallString<24> &Opt) const override {
2098 // If the argument contains a space, enclose it in quotes.
2099 if (Lib.find(" ") != StringRef::npos)
2100 Opt += "\"" + Lib.str() + "\"";
2106 static std::string qualifyWindowsLibrary(llvm::StringRef Lib) {
2107 // If the argument does not end in .lib, automatically add the suffix.
2108 // If the argument contains a space, enclose it in quotes.
2109 // This matches the behavior of MSVC.
2110 bool Quote = (Lib.find(" ") != StringRef::npos);
2111 std::string ArgStr = Quote ? "\"" : "";
2113 if (!Lib.endswith_lower(".lib"))
2115 ArgStr += Quote ? "\"" : "";
2119 class WinX86_32TargetCodeGenInfo : public X86_32TargetCodeGenInfo {
2121 WinX86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT,
2122 bool DarwinVectorABI, bool RetSmallStructInRegABI, bool Win32StructABI,
2123 unsigned NumRegisterParameters)
2124 : X86_32TargetCodeGenInfo(CGT, DarwinVectorABI, RetSmallStructInRegABI,
2125 Win32StructABI, NumRegisterParameters, false) {}
2127 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
2128 CodeGen::CodeGenModule &CGM) const override;
2130 void getDependentLibraryOption(llvm::StringRef Lib,
2131 llvm::SmallString<24> &Opt) const override {
2132 Opt = "/DEFAULTLIB:";
2133 Opt += qualifyWindowsLibrary(Lib);
2136 void getDetectMismatchOption(llvm::StringRef Name,
2137 llvm::StringRef Value,
2138 llvm::SmallString<32> &Opt) const override {
2139 Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\"";
2143 static void addStackProbeSizeTargetAttribute(const Decl *D,
2144 llvm::GlobalValue *GV,
2145 CodeGen::CodeGenModule &CGM) {
2146 if (D && isa<FunctionDecl>(D)) {
2147 if (CGM.getCodeGenOpts().StackProbeSize != 4096) {
2148 llvm::Function *Fn = cast<llvm::Function>(GV);
2150 Fn->addFnAttr("stack-probe-size",
2151 llvm::utostr(CGM.getCodeGenOpts().StackProbeSize));
2156 void WinX86_32TargetCodeGenInfo::setTargetAttributes(const Decl *D,
2157 llvm::GlobalValue *GV,
2158 CodeGen::CodeGenModule &CGM) const {
2159 X86_32TargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
2161 addStackProbeSizeTargetAttribute(D, GV, CGM);
2164 class WinX86_64TargetCodeGenInfo : public TargetCodeGenInfo {
2166 WinX86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT,
2167 X86AVXABILevel AVXLevel)
2168 : TargetCodeGenInfo(new WinX86_64ABIInfo(CGT)) {}
2170 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
2171 CodeGen::CodeGenModule &CGM) const override;
2173 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
2177 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
2178 llvm::Value *Address) const override {
2179 llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8);
2181 // 0-15 are the 16 integer registers.
2183 AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 16);
2187 void getDependentLibraryOption(llvm::StringRef Lib,
2188 llvm::SmallString<24> &Opt) const override {
2189 Opt = "/DEFAULTLIB:";
2190 Opt += qualifyWindowsLibrary(Lib);
2193 void getDetectMismatchOption(llvm::StringRef Name,
2194 llvm::StringRef Value,
2195 llvm::SmallString<32> &Opt) const override {
2196 Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\"";
2200 void WinX86_64TargetCodeGenInfo::setTargetAttributes(const Decl *D,
2201 llvm::GlobalValue *GV,
2202 CodeGen::CodeGenModule &CGM) const {
2203 TargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
2205 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
2206 if (FD->hasAttr<AnyX86InterruptAttr>()) {
2207 llvm::Function *Fn = cast<llvm::Function>(GV);
2208 Fn->setCallingConv(llvm::CallingConv::X86_INTR);
2212 addStackProbeSizeTargetAttribute(D, GV, CGM);
2216 void X86_64ABIInfo::postMerge(unsigned AggregateSize, Class &Lo,
2218 // AMD64-ABI 3.2.3p2: Rule 5. Then a post merger cleanup is done:
2220 // (a) If one of the classes is Memory, the whole argument is passed in
2223 // (b) If X87UP is not preceded by X87, the whole argument is passed in
2226 // (c) If the size of the aggregate exceeds two eightbytes and the first
2227 // eightbyte isn't SSE or any other eightbyte isn't SSEUP, the whole
2228 // argument is passed in memory. NOTE: This is necessary to keep the
2229 // ABI working for processors that don't support the __m256 type.
2231 // (d) If SSEUP is not preceded by SSE or SSEUP, it is converted to SSE.
2233 // Some of these are enforced by the merging logic. Others can arise
2234 // only with unions; for example:
2235 // union { _Complex double; unsigned; }
2237 // Note that clauses (b) and (c) were added in 0.98.
2241 if (Hi == X87Up && Lo != X87 && honorsRevision0_98())
2243 if (AggregateSize > 128 && (Lo != SSE || Hi != SSEUp))
2245 if (Hi == SSEUp && Lo != SSE)
2249 X86_64ABIInfo::Class X86_64ABIInfo::merge(Class Accum, Class Field) {
2250 // AMD64-ABI 3.2.3p2: Rule 4. Each field of an object is
2251 // classified recursively so that always two fields are
2252 // considered. The resulting class is calculated according to
2253 // the classes of the fields in the eightbyte:
2255 // (a) If both classes are equal, this is the resulting class.
2257 // (b) If one of the classes is NO_CLASS, the resulting class is
2260 // (c) If one of the classes is MEMORY, the result is the MEMORY
2263 // (d) If one of the classes is INTEGER, the result is the
2266 // (e) If one of the classes is X87, X87UP, COMPLEX_X87 class,
2267 // MEMORY is used as class.
2269 // (f) Otherwise class SSE is used.
2271 // Accum should never be memory (we should have returned) or
2272 // ComplexX87 (because this cannot be passed in a structure).
2273 assert((Accum != Memory && Accum != ComplexX87) &&
2274 "Invalid accumulated classification during merge.");
2275 if (Accum == Field || Field == NoClass)
2277 if (Field == Memory)
2279 if (Accum == NoClass)
2281 if (Accum == Integer || Field == Integer)
2283 if (Field == X87 || Field == X87Up || Field == ComplexX87 ||
2284 Accum == X87 || Accum == X87Up)
2289 void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase,
2290 Class &Lo, Class &Hi, bool isNamedArg) const {
2291 // FIXME: This code can be simplified by introducing a simple value class for
2292 // Class pairs with appropriate constructor methods for the various
2295 // FIXME: Some of the split computations are wrong; unaligned vectors
2296 // shouldn't be passed in registers for example, so there is no chance they
2297 // can straddle an eightbyte. Verify & simplify.
2301 Class &Current = OffsetBase < 64 ? Lo : Hi;
2304 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
2305 BuiltinType::Kind k = BT->getKind();
2307 if (k == BuiltinType::Void) {
2309 } else if (k == BuiltinType::Int128 || k == BuiltinType::UInt128) {
2312 } else if (k >= BuiltinType::Bool && k <= BuiltinType::LongLong) {
2314 } else if (k == BuiltinType::Float || k == BuiltinType::Double) {
2316 } else if (k == BuiltinType::LongDouble) {
2317 const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat();
2318 if (LDF == &llvm::APFloat::IEEEquad) {
2321 } else if (LDF == &llvm::APFloat::x87DoubleExtended) {
2324 } else if (LDF == &llvm::APFloat::IEEEdouble) {
2327 llvm_unreachable("unexpected long double representation!");
2329 // FIXME: _Decimal32 and _Decimal64 are SSE.
2330 // FIXME: _float128 and _Decimal128 are (SSE, SSEUp).
2334 if (const EnumType *ET = Ty->getAs<EnumType>()) {
2335 // Classify the underlying integer type.
2336 classify(ET->getDecl()->getIntegerType(), OffsetBase, Lo, Hi, isNamedArg);
2340 if (Ty->hasPointerRepresentation()) {
2345 if (Ty->isMemberPointerType()) {
2346 if (Ty->isMemberFunctionPointerType()) {
2347 if (Has64BitPointers) {
2348 // If Has64BitPointers, this is an {i64, i64}, so classify both
2352 // Otherwise, with 32-bit pointers, this is an {i32, i32}. If that
2353 // straddles an eightbyte boundary, Hi should be classified as well.
2354 uint64_t EB_FuncPtr = (OffsetBase) / 64;
2355 uint64_t EB_ThisAdj = (OffsetBase + 64 - 1) / 64;
2356 if (EB_FuncPtr != EB_ThisAdj) {
2368 if (const VectorType *VT = Ty->getAs<VectorType>()) {
2369 uint64_t Size = getContext().getTypeSize(VT);
2370 if (Size == 1 || Size == 8 || Size == 16 || Size == 32) {
2371 // gcc passes the following as integer:
2372 // 4 bytes - <4 x char>, <2 x short>, <1 x int>, <1 x float>
2373 // 2 bytes - <2 x char>, <1 x short>
2374 // 1 byte - <1 x char>
2377 // If this type crosses an eightbyte boundary, it should be
2379 uint64_t EB_Lo = (OffsetBase) / 64;
2380 uint64_t EB_Hi = (OffsetBase + Size - 1) / 64;
2383 } else if (Size == 64) {
2384 QualType ElementType = VT->getElementType();
2386 // gcc passes <1 x double> in memory. :(
2387 if (ElementType->isSpecificBuiltinType(BuiltinType::Double))
2390 // gcc passes <1 x long long> as SSE but clang used to unconditionally
2391 // pass them as integer. For platforms where clang is the de facto
2392 // platform compiler, we must continue to use integer.
2393 if (!classifyIntegerMMXAsSSE() &&
2394 (ElementType->isSpecificBuiltinType(BuiltinType::LongLong) ||
2395 ElementType->isSpecificBuiltinType(BuiltinType::ULongLong) ||
2396 ElementType->isSpecificBuiltinType(BuiltinType::Long) ||
2397 ElementType->isSpecificBuiltinType(BuiltinType::ULong)))
2402 // If this type crosses an eightbyte boundary, it should be
2404 if (OffsetBase && OffsetBase != 64)
2406 } else if (Size == 128 ||
2407 (isNamedArg && Size <= getNativeVectorSizeForAVXABI(AVXLevel))) {
2408 // Arguments of 256-bits are split into four eightbyte chunks. The
2409 // least significant one belongs to class SSE and all the others to class
2410 // SSEUP. The original Lo and Hi design considers that types can't be
2411 // greater than 128-bits, so a 64-bit split in Hi and Lo makes sense.
2412 // This design isn't correct for 256-bits, but since there're no cases
2413 // where the upper parts would need to be inspected, avoid adding
2414 // complexity and just consider Hi to match the 64-256 part.
2416 // Note that per 3.5.7 of AMD64-ABI, 256-bit args are only passed in
2417 // registers if they are "named", i.e. not part of the "..." of a
2418 // variadic function.
2420 // Similarly, per 3.2.3. of the AVX512 draft, 512-bits ("named") args are
2421 // split into eight eightbyte chunks, one SSE and seven SSEUP.
2428 if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
2429 QualType ET = getContext().getCanonicalType(CT->getElementType());
2431 uint64_t Size = getContext().getTypeSize(Ty);
2432 if (ET->isIntegralOrEnumerationType()) {
2435 else if (Size <= 128)
2437 } else if (ET == getContext().FloatTy) {
2439 } else if (ET == getContext().DoubleTy) {
2441 } else if (ET == getContext().LongDoubleTy) {
2442 const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat();
2443 if (LDF == &llvm::APFloat::IEEEquad)
2445 else if (LDF == &llvm::APFloat::x87DoubleExtended)
2446 Current = ComplexX87;
2447 else if (LDF == &llvm::APFloat::IEEEdouble)
2450 llvm_unreachable("unexpected long double representation!");
2453 // If this complex type crosses an eightbyte boundary then it
2455 uint64_t EB_Real = (OffsetBase) / 64;
2456 uint64_t EB_Imag = (OffsetBase + getContext().getTypeSize(ET)) / 64;
2457 if (Hi == NoClass && EB_Real != EB_Imag)
2463 if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
2464 // Arrays are treated like structures.
2466 uint64_t Size = getContext().getTypeSize(Ty);
2468 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
2469 // than four eightbytes, ..., it has class MEMORY.
2473 // AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned
2474 // fields, it has class MEMORY.
2476 // Only need to check alignment of array base.
2477 if (OffsetBase % getContext().getTypeAlign(AT->getElementType()))
2480 // Otherwise implement simplified merge. We could be smarter about
2481 // this, but it isn't worth it and would be harder to verify.
2483 uint64_t EltSize = getContext().getTypeSize(AT->getElementType());
2484 uint64_t ArraySize = AT->getSize().getZExtValue();
2486 // The only case a 256-bit wide vector could be used is when the array
2487 // contains a single 256-bit element. Since Lo and Hi logic isn't extended
2488 // to work for sizes wider than 128, early check and fallback to memory.
2489 if (Size > 128 && EltSize != 256)
2492 for (uint64_t i=0, Offset=OffsetBase; i<ArraySize; ++i, Offset += EltSize) {
2493 Class FieldLo, FieldHi;
2494 classify(AT->getElementType(), Offset, FieldLo, FieldHi, isNamedArg);
2495 Lo = merge(Lo, FieldLo);
2496 Hi = merge(Hi, FieldHi);
2497 if (Lo == Memory || Hi == Memory)
2501 postMerge(Size, Lo, Hi);
2502 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp array classification.");
2506 if (const RecordType *RT = Ty->getAs<RecordType>()) {
2507 uint64_t Size = getContext().getTypeSize(Ty);
2509 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
2510 // than four eightbytes, ..., it has class MEMORY.
2514 // AMD64-ABI 3.2.3p2: Rule 2. If a C++ object has either a non-trivial
2515 // copy constructor or a non-trivial destructor, it is passed by invisible
2517 if (getRecordArgABI(RT, getCXXABI()))
2520 const RecordDecl *RD = RT->getDecl();
2522 // Assume variable sized types are passed in memory.
2523 if (RD->hasFlexibleArrayMember())
2526 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
2528 // Reset Lo class, this will be recomputed.
2531 // If this is a C++ record, classify the bases first.
2532 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
2533 for (const auto &I : CXXRD->bases()) {
2534 assert(!I.isVirtual() && !I.getType()->isDependentType() &&
2535 "Unexpected base class!");
2536 const CXXRecordDecl *Base =
2537 cast<CXXRecordDecl>(I.getType()->getAs<RecordType>()->getDecl());
2539 // Classify this field.
2541 // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate exceeds a
2542 // single eightbyte, each is classified separately. Each eightbyte gets
2543 // initialized to class NO_CLASS.
2544 Class FieldLo, FieldHi;
2546 OffsetBase + getContext().toBits(Layout.getBaseClassOffset(Base));
2547 classify(I.getType(), Offset, FieldLo, FieldHi, isNamedArg);
2548 Lo = merge(Lo, FieldLo);
2549 Hi = merge(Hi, FieldHi);
2550 if (Lo == Memory || Hi == Memory) {
2551 postMerge(Size, Lo, Hi);
2557 // Classify the fields one at a time, merging the results.
2559 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
2560 i != e; ++i, ++idx) {
2561 uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx);
2562 bool BitField = i->isBitField();
2564 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger than
2565 // four eightbytes, or it contains unaligned fields, it has class MEMORY.
2567 // The only case a 256-bit wide vector could be used is when the struct
2568 // contains a single 256-bit element. Since Lo and Hi logic isn't extended
2569 // to work for sizes wider than 128, early check and fallback to memory.
2571 if (Size > 128 && getContext().getTypeSize(i->getType()) != 256) {
2573 postMerge(Size, Lo, Hi);
2576 // Note, skip this test for bit-fields, see below.
2577 if (!BitField && Offset % getContext().getTypeAlign(i->getType())) {
2579 postMerge(Size, Lo, Hi);
2583 // Classify this field.
2585 // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate
2586 // exceeds a single eightbyte, each is classified
2587 // separately. Each eightbyte gets initialized to class
2589 Class FieldLo, FieldHi;
2591 // Bit-fields require special handling, they do not force the
2592 // structure to be passed in memory even if unaligned, and
2593 // therefore they can straddle an eightbyte.
2595 // Ignore padding bit-fields.
2596 if (i->isUnnamedBitfield())
2599 uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx);
2600 uint64_t Size = i->getBitWidthValue(getContext());
2602 uint64_t EB_Lo = Offset / 64;
2603 uint64_t EB_Hi = (Offset + Size - 1) / 64;
2606 assert(EB_Hi == EB_Lo && "Invalid classification, type > 16 bytes.");
2611 FieldHi = EB_Hi ? Integer : NoClass;
2614 classify(i->getType(), Offset, FieldLo, FieldHi, isNamedArg);
2615 Lo = merge(Lo, FieldLo);
2616 Hi = merge(Hi, FieldHi);
2617 if (Lo == Memory || Hi == Memory)
2621 postMerge(Size, Lo, Hi);
2625 ABIArgInfo X86_64ABIInfo::getIndirectReturnResult(QualType Ty) const {
2626 // If this is a scalar LLVM value then assume LLVM will pass it in the right
2628 if (!isAggregateTypeForABI(Ty)) {
2629 // Treat an enum type as its underlying type.
2630 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
2631 Ty = EnumTy->getDecl()->getIntegerType();
2633 return (Ty->isPromotableIntegerType() ?
2634 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
2637 return getNaturalAlignIndirect(Ty);
2640 bool X86_64ABIInfo::IsIllegalVectorType(QualType Ty) const {
2641 if (const VectorType *VecTy = Ty->getAs<VectorType>()) {
2642 uint64_t Size = getContext().getTypeSize(VecTy);
2643 unsigned LargestVector = getNativeVectorSizeForAVXABI(AVXLevel);
2644 if (Size <= 64 || Size > LargestVector)
2651 ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty,
2652 unsigned freeIntRegs) const {
2653 // If this is a scalar LLVM value then assume LLVM will pass it in the right
2656 // This assumption is optimistic, as there could be free registers available
2657 // when we need to pass this argument in memory, and LLVM could try to pass
2658 // the argument in the free register. This does not seem to happen currently,
2659 // but this code would be much safer if we could mark the argument with
2660 // 'onstack'. See PR12193.
2661 if (!isAggregateTypeForABI(Ty) && !IsIllegalVectorType(Ty)) {
2662 // Treat an enum type as its underlying type.
2663 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
2664 Ty = EnumTy->getDecl()->getIntegerType();
2666 return (Ty->isPromotableIntegerType() ?
2667 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
2670 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
2671 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
2673 // Compute the byval alignment. We specify the alignment of the byval in all
2674 // cases so that the mid-level optimizer knows the alignment of the byval.
2675 unsigned Align = std::max(getContext().getTypeAlign(Ty) / 8, 8U);
2677 // Attempt to avoid passing indirect results using byval when possible. This
2678 // is important for good codegen.
2680 // We do this by coercing the value into a scalar type which the backend can
2681 // handle naturally (i.e., without using byval).
2683 // For simplicity, we currently only do this when we have exhausted all of the
2684 // free integer registers. Doing this when there are free integer registers
2685 // would require more care, as we would have to ensure that the coerced value
2686 // did not claim the unused register. That would require either reording the
2687 // arguments to the function (so that any subsequent inreg values came first),
2688 // or only doing this optimization when there were no following arguments that
2691 // We currently expect it to be rare (particularly in well written code) for
2692 // arguments to be passed on the stack when there are still free integer
2693 // registers available (this would typically imply large structs being passed
2694 // by value), so this seems like a fair tradeoff for now.
2696 // We can revisit this if the backend grows support for 'onstack' parameter
2697 // attributes. See PR12193.
2698 if (freeIntRegs == 0) {
2699 uint64_t Size = getContext().getTypeSize(Ty);
2701 // If this type fits in an eightbyte, coerce it into the matching integral
2702 // type, which will end up on the stack (with alignment 8).
2703 if (Align == 8 && Size <= 64)
2704 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
2708 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(Align));
2711 /// The ABI specifies that a value should be passed in a full vector XMM/YMM
2712 /// register. Pick an LLVM IR type that will be passed as a vector register.
2713 llvm::Type *X86_64ABIInfo::GetByteVectorType(QualType Ty) const {
2714 // Wrapper structs/arrays that only contain vectors are passed just like
2715 // vectors; strip them off if present.
2716 if (const Type *InnerTy = isSingleElementStruct(Ty, getContext()))
2717 Ty = QualType(InnerTy, 0);
2719 llvm::Type *IRType = CGT.ConvertType(Ty);
2720 if (isa<llvm::VectorType>(IRType) ||
2721 IRType->getTypeID() == llvm::Type::FP128TyID)
2724 // We couldn't find the preferred IR vector type for 'Ty'.
2725 uint64_t Size = getContext().getTypeSize(Ty);
2726 assert((Size == 128 || Size == 256) && "Invalid type found!");
2728 // Return a LLVM IR vector type based on the size of 'Ty'.
2729 return llvm::VectorType::get(llvm::Type::getDoubleTy(getVMContext()),
2733 /// BitsContainNoUserData - Return true if the specified [start,end) bit range
2734 /// is known to either be off the end of the specified type or being in
2735 /// alignment padding. The user type specified is known to be at most 128 bits
2736 /// in size, and have passed through X86_64ABIInfo::classify with a successful
2737 /// classification that put one of the two halves in the INTEGER class.
2739 /// It is conservatively correct to return false.
2740 static bool BitsContainNoUserData(QualType Ty, unsigned StartBit,
2741 unsigned EndBit, ASTContext &Context) {
2742 // If the bytes being queried are off the end of the type, there is no user
2743 // data hiding here. This handles analysis of builtins, vectors and other
2744 // types that don't contain interesting padding.
2745 unsigned TySize = (unsigned)Context.getTypeSize(Ty);
2746 if (TySize <= StartBit)
2749 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) {
2750 unsigned EltSize = (unsigned)Context.getTypeSize(AT->getElementType());
2751 unsigned NumElts = (unsigned)AT->getSize().getZExtValue();
2753 // Check each element to see if the element overlaps with the queried range.
2754 for (unsigned i = 0; i != NumElts; ++i) {
2755 // If the element is after the span we care about, then we're done..
2756 unsigned EltOffset = i*EltSize;
2757 if (EltOffset >= EndBit) break;
2759 unsigned EltStart = EltOffset < StartBit ? StartBit-EltOffset :0;
2760 if (!BitsContainNoUserData(AT->getElementType(), EltStart,
2761 EndBit-EltOffset, Context))
2764 // If it overlaps no elements, then it is safe to process as padding.
2768 if (const RecordType *RT = Ty->getAs<RecordType>()) {
2769 const RecordDecl *RD = RT->getDecl();
2770 const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
2772 // If this is a C++ record, check the bases first.
2773 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
2774 for (const auto &I : CXXRD->bases()) {
2775 assert(!I.isVirtual() && !I.getType()->isDependentType() &&
2776 "Unexpected base class!");
2777 const CXXRecordDecl *Base =
2778 cast<CXXRecordDecl>(I.getType()->getAs<RecordType>()->getDecl());
2780 // If the base is after the span we care about, ignore it.
2781 unsigned BaseOffset = Context.toBits(Layout.getBaseClassOffset(Base));
2782 if (BaseOffset >= EndBit) continue;
2784 unsigned BaseStart = BaseOffset < StartBit ? StartBit-BaseOffset :0;
2785 if (!BitsContainNoUserData(I.getType(), BaseStart,
2786 EndBit-BaseOffset, Context))
2791 // Verify that no field has data that overlaps the region of interest. Yes
2792 // this could be sped up a lot by being smarter about queried fields,
2793 // however we're only looking at structs up to 16 bytes, so we don't care
2796 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
2797 i != e; ++i, ++idx) {
2798 unsigned FieldOffset = (unsigned)Layout.getFieldOffset(idx);
2800 // If we found a field after the region we care about, then we're done.
2801 if (FieldOffset >= EndBit) break;
2803 unsigned FieldStart = FieldOffset < StartBit ? StartBit-FieldOffset :0;
2804 if (!BitsContainNoUserData(i->getType(), FieldStart, EndBit-FieldOffset,
2809 // If nothing in this record overlapped the area of interest, then we're
2817 /// ContainsFloatAtOffset - Return true if the specified LLVM IR type has a
2818 /// float member at the specified offset. For example, {int,{float}} has a
2819 /// float at offset 4. It is conservatively correct for this routine to return
2821 static bool ContainsFloatAtOffset(llvm::Type *IRType, unsigned IROffset,
2822 const llvm::DataLayout &TD) {
2823 // Base case if we find a float.
2824 if (IROffset == 0 && IRType->isFloatTy())
2827 // If this is a struct, recurse into the field at the specified offset.
2828 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {
2829 const llvm::StructLayout *SL = TD.getStructLayout(STy);
2830 unsigned Elt = SL->getElementContainingOffset(IROffset);
2831 IROffset -= SL->getElementOffset(Elt);
2832 return ContainsFloatAtOffset(STy->getElementType(Elt), IROffset, TD);
2835 // If this is an array, recurse into the field at the specified offset.
2836 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
2837 llvm::Type *EltTy = ATy->getElementType();
2838 unsigned EltSize = TD.getTypeAllocSize(EltTy);
2839 IROffset -= IROffset/EltSize*EltSize;
2840 return ContainsFloatAtOffset(EltTy, IROffset, TD);
2847 /// GetSSETypeAtOffset - Return a type that will be passed by the backend in the
2848 /// low 8 bytes of an XMM register, corresponding to the SSE class.
2849 llvm::Type *X86_64ABIInfo::
2850 GetSSETypeAtOffset(llvm::Type *IRType, unsigned IROffset,
2851 QualType SourceTy, unsigned SourceOffset) const {
2852 // The only three choices we have are either double, <2 x float>, or float. We
2853 // pass as float if the last 4 bytes is just padding. This happens for
2854 // structs that contain 3 floats.
2855 if (BitsContainNoUserData(SourceTy, SourceOffset*8+32,
2856 SourceOffset*8+64, getContext()))
2857 return llvm::Type::getFloatTy(getVMContext());
2859 // We want to pass as <2 x float> if the LLVM IR type contains a float at
2860 // offset+0 and offset+4. Walk the LLVM IR type to find out if this is the
2862 if (ContainsFloatAtOffset(IRType, IROffset, getDataLayout()) &&
2863 ContainsFloatAtOffset(IRType, IROffset+4, getDataLayout()))
2864 return llvm::VectorType::get(llvm::Type::getFloatTy(getVMContext()), 2);
2866 return llvm::Type::getDoubleTy(getVMContext());
2870 /// GetINTEGERTypeAtOffset - The ABI specifies that a value should be passed in
2871 /// an 8-byte GPR. This means that we either have a scalar or we are talking
2872 /// about the high or low part of an up-to-16-byte struct. This routine picks
2873 /// the best LLVM IR type to represent this, which may be i64 or may be anything
2874 /// else that the backend will pass in a GPR that works better (e.g. i8, %foo*,
2877 /// PrefType is an LLVM IR type that corresponds to (part of) the IR type for
2878 /// the source type. IROffset is an offset in bytes into the LLVM IR type that
2879 /// the 8-byte value references. PrefType may be null.
2881 /// SourceTy is the source-level type for the entire argument. SourceOffset is
2882 /// an offset into this that we're processing (which is always either 0 or 8).
2884 llvm::Type *X86_64ABIInfo::
2885 GetINTEGERTypeAtOffset(llvm::Type *IRType, unsigned IROffset,
2886 QualType SourceTy, unsigned SourceOffset) const {
2887 // If we're dealing with an un-offset LLVM IR type, then it means that we're
2888 // returning an 8-byte unit starting with it. See if we can safely use it.
2889 if (IROffset == 0) {
2890 // Pointers and int64's always fill the 8-byte unit.
2891 if ((isa<llvm::PointerType>(IRType) && Has64BitPointers) ||
2892 IRType->isIntegerTy(64))
2895 // If we have a 1/2/4-byte integer, we can use it only if the rest of the
2896 // goodness in the source type is just tail padding. This is allowed to
2897 // kick in for struct {double,int} on the int, but not on
2898 // struct{double,int,int} because we wouldn't return the second int. We
2899 // have to do this analysis on the source type because we can't depend on
2900 // unions being lowered a specific way etc.
2901 if (IRType->isIntegerTy(8) || IRType->isIntegerTy(16) ||
2902 IRType->isIntegerTy(32) ||
2903 (isa<llvm::PointerType>(IRType) && !Has64BitPointers)) {
2904 unsigned BitWidth = isa<llvm::PointerType>(IRType) ? 32 :
2905 cast<llvm::IntegerType>(IRType)->getBitWidth();
2907 if (BitsContainNoUserData(SourceTy, SourceOffset*8+BitWidth,
2908 SourceOffset*8+64, getContext()))
2913 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {
2914 // If this is a struct, recurse into the field at the specified offset.
2915 const llvm::StructLayout *SL = getDataLayout().getStructLayout(STy);
2916 if (IROffset < SL->getSizeInBytes()) {
2917 unsigned FieldIdx = SL->getElementContainingOffset(IROffset);
2918 IROffset -= SL->getElementOffset(FieldIdx);
2920 return GetINTEGERTypeAtOffset(STy->getElementType(FieldIdx), IROffset,
2921 SourceTy, SourceOffset);
2925 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
2926 llvm::Type *EltTy = ATy->getElementType();
2927 unsigned EltSize = getDataLayout().getTypeAllocSize(EltTy);
2928 unsigned EltOffset = IROffset/EltSize*EltSize;
2929 return GetINTEGERTypeAtOffset(EltTy, IROffset-EltOffset, SourceTy,
2933 // Okay, we don't have any better idea of what to pass, so we pass this in an
2934 // integer register that isn't too big to fit the rest of the struct.
2935 unsigned TySizeInBytes =
2936 (unsigned)getContext().getTypeSizeInChars(SourceTy).getQuantity();
2938 assert(TySizeInBytes != SourceOffset && "Empty field?");
2940 // It is always safe to classify this as an integer type up to i64 that
2941 // isn't larger than the structure.
2942 return llvm::IntegerType::get(getVMContext(),
2943 std::min(TySizeInBytes-SourceOffset, 8U)*8);
2947 /// GetX86_64ByValArgumentPair - Given a high and low type that can ideally
2948 /// be used as elements of a two register pair to pass or return, return a
2949 /// first class aggregate to represent them. For example, if the low part of
2950 /// a by-value argument should be passed as i32* and the high part as float,
2951 /// return {i32*, float}.
2953 GetX86_64ByValArgumentPair(llvm::Type *Lo, llvm::Type *Hi,
2954 const llvm::DataLayout &TD) {
2955 // In order to correctly satisfy the ABI, we need to the high part to start
2956 // at offset 8. If the high and low parts we inferred are both 4-byte types
2957 // (e.g. i32 and i32) then the resultant struct type ({i32,i32}) won't have
2958 // the second element at offset 8. Check for this:
2959 unsigned LoSize = (unsigned)TD.getTypeAllocSize(Lo);
2960 unsigned HiAlign = TD.getABITypeAlignment(Hi);
2961 unsigned HiStart = llvm::alignTo(LoSize, HiAlign);
2962 assert(HiStart != 0 && HiStart <= 8 && "Invalid x86-64 argument pair!");
2964 // To handle this, we have to increase the size of the low part so that the
2965 // second element will start at an 8 byte offset. We can't increase the size
2966 // of the second element because it might make us access off the end of the
2969 // There are usually two sorts of types the ABI generation code can produce
2970 // for the low part of a pair that aren't 8 bytes in size: float or
2971 // i8/i16/i32. This can also include pointers when they are 32-bit (X32 and
2973 // Promote these to a larger type.
2974 if (Lo->isFloatTy())
2975 Lo = llvm::Type::getDoubleTy(Lo->getContext());
2977 assert((Lo->isIntegerTy() || Lo->isPointerTy())
2978 && "Invalid/unknown lo type");
2979 Lo = llvm::Type::getInt64Ty(Lo->getContext());
2983 llvm::StructType *Result = llvm::StructType::get(Lo, Hi, nullptr);
2986 // Verify that the second element is at an 8-byte offset.
2987 assert(TD.getStructLayout(Result)->getElementOffset(1) == 8 &&
2988 "Invalid x86-64 argument pair!");
2992 ABIArgInfo X86_64ABIInfo::
2993 classifyReturnType(QualType RetTy) const {
2994 // AMD64-ABI 3.2.3p4: Rule 1. Classify the return type with the
2995 // classification algorithm.
2996 X86_64ABIInfo::Class Lo, Hi;
2997 classify(RetTy, 0, Lo, Hi, /*isNamedArg*/ true);
2999 // Check some invariants.
3000 assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.");
3001 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.");
3003 llvm::Type *ResType = nullptr;
3007 return ABIArgInfo::getIgnore();
3008 // If the low part is just padding, it takes no register, leave ResType
3010 assert((Hi == SSE || Hi == Integer || Hi == X87Up) &&
3011 "Unknown missing lo part");
3016 llvm_unreachable("Invalid classification for lo word.");
3018 // AMD64-ABI 3.2.3p4: Rule 2. Types of class memory are returned via
3021 return getIndirectReturnResult(RetTy);
3023 // AMD64-ABI 3.2.3p4: Rule 3. If the class is INTEGER, the next
3024 // available register of the sequence %rax, %rdx is used.
3026 ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0);
3028 // If we have a sign or zero extended integer, make sure to return Extend
3029 // so that the parameter gets the right LLVM IR attributes.
3030 if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) {
3031 // Treat an enum type as its underlying type.
3032 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
3033 RetTy = EnumTy->getDecl()->getIntegerType();
3035 if (RetTy->isIntegralOrEnumerationType() &&
3036 RetTy->isPromotableIntegerType())
3037 return ABIArgInfo::getExtend();
3041 // AMD64-ABI 3.2.3p4: Rule 4. If the class is SSE, the next
3042 // available SSE register of the sequence %xmm0, %xmm1 is used.
3044 ResType = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0);
3047 // AMD64-ABI 3.2.3p4: Rule 6. If the class is X87, the value is
3048 // returned on the X87 stack in %st0 as 80-bit x87 number.
3050 ResType = llvm::Type::getX86_FP80Ty(getVMContext());
3053 // AMD64-ABI 3.2.3p4: Rule 8. If the class is COMPLEX_X87, the real
3054 // part of the value is returned in %st0 and the imaginary part in
3057 assert(Hi == ComplexX87 && "Unexpected ComplexX87 classification.");
3058 ResType = llvm::StructType::get(llvm::Type::getX86_FP80Ty(getVMContext()),
3059 llvm::Type::getX86_FP80Ty(getVMContext()),
3064 llvm::Type *HighPart = nullptr;
3066 // Memory was handled previously and X87 should
3067 // never occur as a hi class.
3070 llvm_unreachable("Invalid classification for hi word.");
3072 case ComplexX87: // Previously handled.
3077 HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
3078 if (Lo == NoClass) // Return HighPart at offset 8 in memory.
3079 return ABIArgInfo::getDirect(HighPart, 8);
3082 HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
3083 if (Lo == NoClass) // Return HighPart at offset 8 in memory.
3084 return ABIArgInfo::getDirect(HighPart, 8);
3087 // AMD64-ABI 3.2.3p4: Rule 5. If the class is SSEUP, the eightbyte
3088 // is passed in the next available eightbyte chunk if the last used
3091 // SSEUP should always be preceded by SSE, just widen.
3093 assert(Lo == SSE && "Unexpected SSEUp classification.");
3094 ResType = GetByteVectorType(RetTy);
3097 // AMD64-ABI 3.2.3p4: Rule 7. If the class is X87UP, the value is
3098 // returned together with the previous X87 value in %st0.
3100 // If X87Up is preceded by X87, we don't need to do
3101 // anything. However, in some cases with unions it may not be
3102 // preceded by X87. In such situations we follow gcc and pass the
3103 // extra bits in an SSE reg.
3105 HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
3106 if (Lo == NoClass) // Return HighPart at offset 8 in memory.
3107 return ABIArgInfo::getDirect(HighPart, 8);
3112 // If a high part was specified, merge it together with the low part. It is
3113 // known to pass in the high eightbyte of the result. We do this by forming a
3114 // first class struct aggregate with the high and low part: {low, high}
3116 ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getDataLayout());
3118 return ABIArgInfo::getDirect(ResType);
3121 ABIArgInfo X86_64ABIInfo::classifyArgumentType(
3122 QualType Ty, unsigned freeIntRegs, unsigned &neededInt, unsigned &neededSSE,
3126 Ty = useFirstFieldIfTransparentUnion(Ty);
3128 X86_64ABIInfo::Class Lo, Hi;
3129 classify(Ty, 0, Lo, Hi, isNamedArg);
3131 // Check some invariants.
3132 // FIXME: Enforce these by construction.
3133 assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.");
3134 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.");
3138 llvm::Type *ResType = nullptr;
3142 return ABIArgInfo::getIgnore();
3143 // If the low part is just padding, it takes no register, leave ResType
3145 assert((Hi == SSE || Hi == Integer || Hi == X87Up) &&
3146 "Unknown missing lo part");
3149 // AMD64-ABI 3.2.3p3: Rule 1. If the class is MEMORY, pass the argument
3153 // AMD64-ABI 3.2.3p3: Rule 5. If the class is X87, X87UP or
3154 // COMPLEX_X87, it is passed in memory.
3157 if (getRecordArgABI(Ty, getCXXABI()) == CGCXXABI::RAA_Indirect)
3159 return getIndirectResult(Ty, freeIntRegs);
3163 llvm_unreachable("Invalid classification for lo word.");
3165 // AMD64-ABI 3.2.3p3: Rule 2. If the class is INTEGER, the next
3166 // available register of the sequence %rdi, %rsi, %rdx, %rcx, %r8
3171 // Pick an 8-byte type based on the preferred type.
3172 ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 0, Ty, 0);
3174 // If we have a sign or zero extended integer, make sure to return Extend
3175 // so that the parameter gets the right LLVM IR attributes.
3176 if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) {
3177 // Treat an enum type as its underlying type.
3178 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
3179 Ty = EnumTy->getDecl()->getIntegerType();
3181 if (Ty->isIntegralOrEnumerationType() &&
3182 Ty->isPromotableIntegerType())
3183 return ABIArgInfo::getExtend();
3188 // AMD64-ABI 3.2.3p3: Rule 3. If the class is SSE, the next
3189 // available SSE register is used, the registers are taken in the
3190 // order from %xmm0 to %xmm7.
3192 llvm::Type *IRType = CGT.ConvertType(Ty);
3193 ResType = GetSSETypeAtOffset(IRType, 0, Ty, 0);
3199 llvm::Type *HighPart = nullptr;
3201 // Memory was handled previously, ComplexX87 and X87 should
3202 // never occur as hi classes, and X87Up must be preceded by X87,
3203 // which is passed in memory.
3207 llvm_unreachable("Invalid classification for hi word.");
3209 case NoClass: break;
3213 // Pick an 8-byte type based on the preferred type.
3214 HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8);
3216 if (Lo == NoClass) // Pass HighPart at offset 8 in memory.
3217 return ABIArgInfo::getDirect(HighPart, 8);
3220 // X87Up generally doesn't occur here (long double is passed in
3221 // memory), except in situations involving unions.
3224 HighPart = GetSSETypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8);
3226 if (Lo == NoClass) // Pass HighPart at offset 8 in memory.
3227 return ABIArgInfo::getDirect(HighPart, 8);
3232 // AMD64-ABI 3.2.3p3: Rule 4. If the class is SSEUP, the
3233 // eightbyte is passed in the upper half of the last used SSE
3234 // register. This only happens when 128-bit vectors are passed.
3236 assert(Lo == SSE && "Unexpected SSEUp classification");
3237 ResType = GetByteVectorType(Ty);
3241 // If a high part was specified, merge it together with the low part. It is
3242 // known to pass in the high eightbyte of the result. We do this by forming a
3243 // first class struct aggregate with the high and low part: {low, high}
3245 ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getDataLayout());
3247 return ABIArgInfo::getDirect(ResType);
3250 void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const {
3252 if (!getCXXABI().classifyReturnType(FI))
3253 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
3255 // Keep track of the number of assigned registers.
3256 unsigned freeIntRegs = 6, freeSSERegs = 8;
3258 // If the return value is indirect, then the hidden argument is consuming one
3259 // integer register.
3260 if (FI.getReturnInfo().isIndirect())
3263 // The chain argument effectively gives us another free register.
3264 if (FI.isChainCall())
3267 unsigned NumRequiredArgs = FI.getNumRequiredArgs();
3268 // AMD64-ABI 3.2.3p3: Once arguments are classified, the registers
3269 // get assigned (in left-to-right order) for passing as follows...
3271 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
3272 it != ie; ++it, ++ArgNo) {
3273 bool IsNamedArg = ArgNo < NumRequiredArgs;
3275 unsigned neededInt, neededSSE;
3276 it->info = classifyArgumentType(it->type, freeIntRegs, neededInt,
3277 neededSSE, IsNamedArg);
3279 // AMD64-ABI 3.2.3p3: If there are no registers available for any
3280 // eightbyte of an argument, the whole argument is passed on the
3281 // stack. If registers have already been assigned for some
3282 // eightbytes of such an argument, the assignments get reverted.
3283 if (freeIntRegs >= neededInt && freeSSERegs >= neededSSE) {
3284 freeIntRegs -= neededInt;
3285 freeSSERegs -= neededSSE;
3287 it->info = getIndirectResult(it->type, freeIntRegs);
3292 static Address EmitX86_64VAArgFromMemory(CodeGenFunction &CGF,
3293 Address VAListAddr, QualType Ty) {
3294 Address overflow_arg_area_p = CGF.Builder.CreateStructGEP(
3295 VAListAddr, 2, CharUnits::fromQuantity(8), "overflow_arg_area_p");
3296 llvm::Value *overflow_arg_area =
3297 CGF.Builder.CreateLoad(overflow_arg_area_p, "overflow_arg_area");
3299 // AMD64-ABI 3.5.7p5: Step 7. Align l->overflow_arg_area upwards to a 16
3300 // byte boundary if alignment needed by type exceeds 8 byte boundary.
3301 // It isn't stated explicitly in the standard, but in practice we use
3302 // alignment greater than 16 where necessary.
3303 CharUnits Align = CGF.getContext().getTypeAlignInChars(Ty);
3304 if (Align > CharUnits::fromQuantity(8)) {
3305 overflow_arg_area = emitRoundPointerUpToAlignment(CGF, overflow_arg_area,
3309 // AMD64-ABI 3.5.7p5: Step 8. Fetch type from l->overflow_arg_area.
3310 llvm::Type *LTy = CGF.ConvertTypeForMem(Ty);
3312 CGF.Builder.CreateBitCast(overflow_arg_area,
3313 llvm::PointerType::getUnqual(LTy));
3315 // AMD64-ABI 3.5.7p5: Step 9. Set l->overflow_arg_area to:
3316 // l->overflow_arg_area + sizeof(type).
3317 // AMD64-ABI 3.5.7p5: Step 10. Align l->overflow_arg_area upwards to
3318 // an 8 byte boundary.
3320 uint64_t SizeInBytes = (CGF.getContext().getTypeSize(Ty) + 7) / 8;
3321 llvm::Value *Offset =
3322 llvm::ConstantInt::get(CGF.Int32Ty, (SizeInBytes + 7) & ~7);
3323 overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset,
3324 "overflow_arg_area.next");
3325 CGF.Builder.CreateStore(overflow_arg_area, overflow_arg_area_p);
3327 // AMD64-ABI 3.5.7p5: Step 11. Return the fetched type.
3328 return Address(Res, Align);
3331 Address X86_64ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
3332 QualType Ty) const {
3333 // Assume that va_list type is correct; should be pointer to LLVM type:
3337 // i8* overflow_arg_area;
3338 // i8* reg_save_area;
3340 unsigned neededInt, neededSSE;
3342 Ty = getContext().getCanonicalType(Ty);
3343 ABIArgInfo AI = classifyArgumentType(Ty, 0, neededInt, neededSSE,
3344 /*isNamedArg*/false);
3346 // AMD64-ABI 3.5.7p5: Step 1. Determine whether type may be passed
3347 // in the registers. If not go to step 7.
3348 if (!neededInt && !neededSSE)
3349 return EmitX86_64VAArgFromMemory(CGF, VAListAddr, Ty);
3351 // AMD64-ABI 3.5.7p5: Step 2. Compute num_gp to hold the number of
3352 // general purpose registers needed to pass type and num_fp to hold
3353 // the number of floating point registers needed.
3355 // AMD64-ABI 3.5.7p5: Step 3. Verify whether arguments fit into
3356 // registers. In the case: l->gp_offset > 48 - num_gp * 8 or
3357 // l->fp_offset > 304 - num_fp * 16 go to step 7.
3359 // NOTE: 304 is a typo, there are (6 * 8 + 8 * 16) = 176 bytes of
3360 // register save space).
3362 llvm::Value *InRegs = nullptr;
3363 Address gp_offset_p = Address::invalid(), fp_offset_p = Address::invalid();
3364 llvm::Value *gp_offset = nullptr, *fp_offset = nullptr;
3367 CGF.Builder.CreateStructGEP(VAListAddr, 0, CharUnits::Zero(),
3369 gp_offset = CGF.Builder.CreateLoad(gp_offset_p, "gp_offset");
3370 InRegs = llvm::ConstantInt::get(CGF.Int32Ty, 48 - neededInt * 8);
3371 InRegs = CGF.Builder.CreateICmpULE(gp_offset, InRegs, "fits_in_gp");
3376 CGF.Builder.CreateStructGEP(VAListAddr, 1, CharUnits::fromQuantity(4),
3378 fp_offset = CGF.Builder.CreateLoad(fp_offset_p, "fp_offset");
3379 llvm::Value *FitsInFP =
3380 llvm::ConstantInt::get(CGF.Int32Ty, 176 - neededSSE * 16);
3381 FitsInFP = CGF.Builder.CreateICmpULE(fp_offset, FitsInFP, "fits_in_fp");
3382 InRegs = InRegs ? CGF.Builder.CreateAnd(InRegs, FitsInFP) : FitsInFP;
3385 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
3386 llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem");
3387 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
3388 CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock);
3390 // Emit code to load the value if it was passed in registers.
3392 CGF.EmitBlock(InRegBlock);
3394 // AMD64-ABI 3.5.7p5: Step 4. Fetch type from l->reg_save_area with
3395 // an offset of l->gp_offset and/or l->fp_offset. This may require
3396 // copying to a temporary location in case the parameter is passed
3397 // in different register classes or requires an alignment greater
3398 // than 8 for general purpose registers and 16 for XMM registers.
3400 // FIXME: This really results in shameful code when we end up needing to
3401 // collect arguments from different places; often what should result in a
3402 // simple assembling of a structure from scattered addresses has many more
3403 // loads than necessary. Can we clean this up?
3404 llvm::Type *LTy = CGF.ConvertTypeForMem(Ty);
3405 llvm::Value *RegSaveArea = CGF.Builder.CreateLoad(
3406 CGF.Builder.CreateStructGEP(VAListAddr, 3, CharUnits::fromQuantity(16)),
3409 Address RegAddr = Address::invalid();
3410 if (neededInt && neededSSE) {
3412 assert(AI.isDirect() && "Unexpected ABI info for mixed regs");
3413 llvm::StructType *ST = cast<llvm::StructType>(AI.getCoerceToType());
3414 Address Tmp = CGF.CreateMemTemp(Ty);
3415 Tmp = CGF.Builder.CreateElementBitCast(Tmp, ST);
3416 assert(ST->getNumElements() == 2 && "Unexpected ABI info for mixed regs");
3417 llvm::Type *TyLo = ST->getElementType(0);
3418 llvm::Type *TyHi = ST->getElementType(1);
3419 assert((TyLo->isFPOrFPVectorTy() ^ TyHi->isFPOrFPVectorTy()) &&
3420 "Unexpected ABI info for mixed regs");
3421 llvm::Type *PTyLo = llvm::PointerType::getUnqual(TyLo);
3422 llvm::Type *PTyHi = llvm::PointerType::getUnqual(TyHi);
3423 llvm::Value *GPAddr = CGF.Builder.CreateGEP(RegSaveArea, gp_offset);
3424 llvm::Value *FPAddr = CGF.Builder.CreateGEP(RegSaveArea, fp_offset);
3425 llvm::Value *RegLoAddr = TyLo->isFPOrFPVectorTy() ? FPAddr : GPAddr;
3426 llvm::Value *RegHiAddr = TyLo->isFPOrFPVectorTy() ? GPAddr : FPAddr;
3428 // Copy the first element.
3430 CGF.Builder.CreateDefaultAlignedLoad(
3431 CGF.Builder.CreateBitCast(RegLoAddr, PTyLo));
3432 CGF.Builder.CreateStore(V,
3433 CGF.Builder.CreateStructGEP(Tmp, 0, CharUnits::Zero()));
3435 // Copy the second element.
3436 V = CGF.Builder.CreateDefaultAlignedLoad(
3437 CGF.Builder.CreateBitCast(RegHiAddr, PTyHi));
3438 CharUnits Offset = CharUnits::fromQuantity(
3439 getDataLayout().getStructLayout(ST)->getElementOffset(1));
3440 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1, Offset));
3442 RegAddr = CGF.Builder.CreateElementBitCast(Tmp, LTy);
3443 } else if (neededInt) {
3444 RegAddr = Address(CGF.Builder.CreateGEP(RegSaveArea, gp_offset),
3445 CharUnits::fromQuantity(8));
3446 RegAddr = CGF.Builder.CreateElementBitCast(RegAddr, LTy);
3448 // Copy to a temporary if necessary to ensure the appropriate alignment.
3449 std::pair<CharUnits, CharUnits> SizeAlign =
3450 getContext().getTypeInfoInChars(Ty);
3451 uint64_t TySize = SizeAlign.first.getQuantity();
3452 CharUnits TyAlign = SizeAlign.second;
3454 // Copy into a temporary if the type is more aligned than the
3455 // register save area.
3456 if (TyAlign.getQuantity() > 8) {
3457 Address Tmp = CGF.CreateMemTemp(Ty);
3458 CGF.Builder.CreateMemCpy(Tmp, RegAddr, TySize, false);
3462 } else if (neededSSE == 1) {
3463 RegAddr = Address(CGF.Builder.CreateGEP(RegSaveArea, fp_offset),
3464 CharUnits::fromQuantity(16));
3465 RegAddr = CGF.Builder.CreateElementBitCast(RegAddr, LTy);
3467 assert(neededSSE == 2 && "Invalid number of needed registers!");
3468 // SSE registers are spaced 16 bytes apart in the register save
3469 // area, we need to collect the two eightbytes together.
3470 // The ABI isn't explicit about this, but it seems reasonable
3471 // to assume that the slots are 16-byte aligned, since the stack is
3472 // naturally 16-byte aligned and the prologue is expected to store
3473 // all the SSE registers to the RSA.
3474 Address RegAddrLo = Address(CGF.Builder.CreateGEP(RegSaveArea, fp_offset),
3475 CharUnits::fromQuantity(16));
3477 CGF.Builder.CreateConstInBoundsByteGEP(RegAddrLo,
3478 CharUnits::fromQuantity(16));
3479 llvm::Type *DoubleTy = CGF.DoubleTy;
3480 llvm::StructType *ST = llvm::StructType::get(DoubleTy, DoubleTy, nullptr);
3482 Address Tmp = CGF.CreateMemTemp(Ty);
3483 Tmp = CGF.Builder.CreateElementBitCast(Tmp, ST);
3484 V = CGF.Builder.CreateLoad(
3485 CGF.Builder.CreateElementBitCast(RegAddrLo, DoubleTy));
3486 CGF.Builder.CreateStore(V,
3487 CGF.Builder.CreateStructGEP(Tmp, 0, CharUnits::Zero()));
3488 V = CGF.Builder.CreateLoad(
3489 CGF.Builder.CreateElementBitCast(RegAddrHi, DoubleTy));
3490 CGF.Builder.CreateStore(V,
3491 CGF.Builder.CreateStructGEP(Tmp, 1, CharUnits::fromQuantity(8)));
3493 RegAddr = CGF.Builder.CreateElementBitCast(Tmp, LTy);
3496 // AMD64-ABI 3.5.7p5: Step 5. Set:
3497 // l->gp_offset = l->gp_offset + num_gp * 8
3498 // l->fp_offset = l->fp_offset + num_fp * 16.
3500 llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededInt * 8);
3501 CGF.Builder.CreateStore(CGF.Builder.CreateAdd(gp_offset, Offset),
3505 llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededSSE * 16);
3506 CGF.Builder.CreateStore(CGF.Builder.CreateAdd(fp_offset, Offset),
3509 CGF.EmitBranch(ContBlock);
3511 // Emit code to load the value if it was passed in memory.
3513 CGF.EmitBlock(InMemBlock);
3514 Address MemAddr = EmitX86_64VAArgFromMemory(CGF, VAListAddr, Ty);
3516 // Return the appropriate result.
3518 CGF.EmitBlock(ContBlock);
3519 Address ResAddr = emitMergePHI(CGF, RegAddr, InRegBlock, MemAddr, InMemBlock,
3524 Address X86_64ABIInfo::EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
3525 QualType Ty) const {
3526 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*indirect*/ false,
3527 CGF.getContext().getTypeInfoInChars(Ty),
3528 CharUnits::fromQuantity(8),
3529 /*allowHigherAlign*/ false);
3532 ABIArgInfo WinX86_64ABIInfo::classify(QualType Ty, unsigned &FreeSSERegs,
3533 bool IsReturnType) const {
3535 if (Ty->isVoidType())
3536 return ABIArgInfo::getIgnore();
3538 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
3539 Ty = EnumTy->getDecl()->getIntegerType();
3541 TypeInfo Info = getContext().getTypeInfo(Ty);
3542 uint64_t Width = Info.Width;
3543 CharUnits Align = getContext().toCharUnitsFromBits(Info.Align);
3545 const RecordType *RT = Ty->getAs<RecordType>();
3547 if (!IsReturnType) {
3548 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI()))
3549 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
3552 if (RT->getDecl()->hasFlexibleArrayMember())
3553 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
3557 // vectorcall adds the concept of a homogenous vector aggregate, similar to
3559 const Type *Base = nullptr;
3560 uint64_t NumElts = 0;
3561 if (FreeSSERegs && isHomogeneousAggregate(Ty, Base, NumElts)) {
3562 if (FreeSSERegs >= NumElts) {
3563 FreeSSERegs -= NumElts;
3564 if (IsReturnType || Ty->isBuiltinType() || Ty->isVectorType())
3565 return ABIArgInfo::getDirect();
3566 return ABIArgInfo::getExpand();
3568 return ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
3572 if (Ty->isMemberPointerType()) {
3573 // If the member pointer is represented by an LLVM int or ptr, pass it
3575 llvm::Type *LLTy = CGT.ConvertType(Ty);
3576 if (LLTy->isPointerTy() || LLTy->isIntegerTy())
3577 return ABIArgInfo::getDirect();
3580 if (RT || Ty->isAnyComplexType() || Ty->isMemberPointerType()) {
3581 // MS x64 ABI requirement: "Any argument that doesn't fit in 8 bytes, or is
3582 // not 1, 2, 4, or 8 bytes, must be passed by reference."
3583 if (Width > 64 || !llvm::isPowerOf2_64(Width))
3584 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
3586 // Otherwise, coerce it to a small integer.
3587 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Width));
3590 // Bool type is always extended to the ABI, other builtin types are not
3592 const BuiltinType *BT = Ty->getAs<BuiltinType>();
3593 if (BT && BT->getKind() == BuiltinType::Bool)
3594 return ABIArgInfo::getExtend();
3596 // Mingw64 GCC uses the old 80 bit extended precision floating point unit. It
3597 // passes them indirectly through memory.
3598 if (IsMingw64 && BT && BT->getKind() == BuiltinType::LongDouble) {
3599 const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat();
3600 if (LDF == &llvm::APFloat::x87DoubleExtended)
3601 return ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
3604 return ABIArgInfo::getDirect();
3607 void WinX86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const {
3609 FI.getCallingConvention() == llvm::CallingConv::X86_VectorCall;
3611 // We can use up to 4 SSE return registers with vectorcall.
3612 unsigned FreeSSERegs = IsVectorCall ? 4 : 0;
3613 if (!getCXXABI().classifyReturnType(FI))
3614 FI.getReturnInfo() = classify(FI.getReturnType(), FreeSSERegs, true);
3616 // We can use up to 6 SSE register parameters with vectorcall.
3617 FreeSSERegs = IsVectorCall ? 6 : 0;
3618 for (auto &I : FI.arguments())
3619 I.info = classify(I.type, FreeSSERegs, false);
3622 Address WinX86_64ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
3623 QualType Ty) const {
3624 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*indirect*/ false,
3625 CGF.getContext().getTypeInfoInChars(Ty),
3626 CharUnits::fromQuantity(8),
3627 /*allowHigherAlign*/ false);
3632 /// PPC32_SVR4_ABIInfo - The 32-bit PowerPC ELF (SVR4) ABI information.
3633 class PPC32_SVR4_ABIInfo : public DefaultABIInfo {
3634 bool IsSoftFloatABI;
3636 PPC32_SVR4_ABIInfo(CodeGen::CodeGenTypes &CGT, bool SoftFloatABI)
3637 : DefaultABIInfo(CGT), IsSoftFloatABI(SoftFloatABI) {}
3639 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
3640 QualType Ty) const override;
3643 class PPC32TargetCodeGenInfo : public TargetCodeGenInfo {
3645 PPC32TargetCodeGenInfo(CodeGenTypes &CGT, bool SoftFloatABI)
3646 : TargetCodeGenInfo(new PPC32_SVR4_ABIInfo(CGT, SoftFloatABI)) {}
3648 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
3649 // This is recovered from gcc output.
3650 return 1; // r1 is the dedicated stack pointer
3653 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
3654 llvm::Value *Address) const override;
3659 // TODO: this implementation is now likely redundant with
3660 // DefaultABIInfo::EmitVAArg.
3661 Address PPC32_SVR4_ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAList,
3662 QualType Ty) const {
3663 const unsigned OverflowLimit = 8;
3664 if (const ComplexType *CTy = Ty->getAs<ComplexType>()) {
3665 // TODO: Implement this. For now ignore.
3667 return Address::invalid(); // FIXME?
3670 // struct __va_list_tag {
3671 // unsigned char gpr;
3672 // unsigned char fpr;
3673 // unsigned short reserved;
3674 // void *overflow_arg_area;
3675 // void *reg_save_area;
3678 bool isI64 = Ty->isIntegerType() && getContext().getTypeSize(Ty) == 64;
3680 Ty->isIntegerType() || Ty->isPointerType() || Ty->isAggregateType();
3681 bool isF64 = Ty->isFloatingType() && getContext().getTypeSize(Ty) == 64;
3683 // All aggregates are passed indirectly? That doesn't seem consistent
3684 // with the argument-lowering code.
3685 bool isIndirect = Ty->isAggregateType();
3687 CGBuilderTy &Builder = CGF.Builder;
3689 // The calling convention either uses 1-2 GPRs or 1 FPR.
3690 Address NumRegsAddr = Address::invalid();
3691 if (isInt || IsSoftFloatABI) {
3692 NumRegsAddr = Builder.CreateStructGEP(VAList, 0, CharUnits::Zero(), "gpr");
3694 NumRegsAddr = Builder.CreateStructGEP(VAList, 1, CharUnits::One(), "fpr");
3697 llvm::Value *NumRegs = Builder.CreateLoad(NumRegsAddr, "numUsedRegs");
3699 // "Align" the register count when TY is i64.
3700 if (isI64 || (isF64 && IsSoftFloatABI)) {
3701 NumRegs = Builder.CreateAdd(NumRegs, Builder.getInt8(1));
3702 NumRegs = Builder.CreateAnd(NumRegs, Builder.getInt8((uint8_t) ~1U));
3706 Builder.CreateICmpULT(NumRegs, Builder.getInt8(OverflowLimit), "cond");
3708 llvm::BasicBlock *UsingRegs = CGF.createBasicBlock("using_regs");
3709 llvm::BasicBlock *UsingOverflow = CGF.createBasicBlock("using_overflow");
3710 llvm::BasicBlock *Cont = CGF.createBasicBlock("cont");
3712 Builder.CreateCondBr(CC, UsingRegs, UsingOverflow);
3714 llvm::Type *DirectTy = CGF.ConvertType(Ty);
3715 if (isIndirect) DirectTy = DirectTy->getPointerTo(0);
3717 // Case 1: consume registers.
3718 Address RegAddr = Address::invalid();
3720 CGF.EmitBlock(UsingRegs);
3722 Address RegSaveAreaPtr =
3723 Builder.CreateStructGEP(VAList, 4, CharUnits::fromQuantity(8));
3724 RegAddr = Address(Builder.CreateLoad(RegSaveAreaPtr),
3725 CharUnits::fromQuantity(8));
3726 assert(RegAddr.getElementType() == CGF.Int8Ty);
3728 // Floating-point registers start after the general-purpose registers.
3729 if (!(isInt || IsSoftFloatABI)) {
3730 RegAddr = Builder.CreateConstInBoundsByteGEP(RegAddr,
3731 CharUnits::fromQuantity(32));
3734 // Get the address of the saved value by scaling the number of
3735 // registers we've used by the number of
3736 CharUnits RegSize = CharUnits::fromQuantity((isInt || IsSoftFloatABI) ? 4 : 8);
3737 llvm::Value *RegOffset =
3738 Builder.CreateMul(NumRegs, Builder.getInt8(RegSize.getQuantity()));
3739 RegAddr = Address(Builder.CreateInBoundsGEP(CGF.Int8Ty,
3740 RegAddr.getPointer(), RegOffset),
3741 RegAddr.getAlignment().alignmentOfArrayElement(RegSize));
3742 RegAddr = Builder.CreateElementBitCast(RegAddr, DirectTy);
3744 // Increase the used-register count.
3746 Builder.CreateAdd(NumRegs,
3747 Builder.getInt8((isI64 || (isF64 && IsSoftFloatABI)) ? 2 : 1));
3748 Builder.CreateStore(NumRegs, NumRegsAddr);
3750 CGF.EmitBranch(Cont);
3753 // Case 2: consume space in the overflow area.
3754 Address MemAddr = Address::invalid();
3756 CGF.EmitBlock(UsingOverflow);
3758 Builder.CreateStore(Builder.getInt8(OverflowLimit), NumRegsAddr);
3760 // Everything in the overflow area is rounded up to a size of at least 4.
3761 CharUnits OverflowAreaAlign = CharUnits::fromQuantity(4);
3765 auto TypeInfo = CGF.getContext().getTypeInfoInChars(Ty);
3766 Size = TypeInfo.first.alignTo(OverflowAreaAlign);
3768 Size = CGF.getPointerSize();
3771 Address OverflowAreaAddr =
3772 Builder.CreateStructGEP(VAList, 3, CharUnits::fromQuantity(4));
3773 Address OverflowArea(Builder.CreateLoad(OverflowAreaAddr, "argp.cur"),
3775 // Round up address of argument to alignment
3776 CharUnits Align = CGF.getContext().getTypeAlignInChars(Ty);
3777 if (Align > OverflowAreaAlign) {
3778 llvm::Value *Ptr = OverflowArea.getPointer();
3779 OverflowArea = Address(emitRoundPointerUpToAlignment(CGF, Ptr, Align),
3783 MemAddr = Builder.CreateElementBitCast(OverflowArea, DirectTy);
3785 // Increase the overflow area.
3786 OverflowArea = Builder.CreateConstInBoundsByteGEP(OverflowArea, Size);
3787 Builder.CreateStore(OverflowArea.getPointer(), OverflowAreaAddr);
3788 CGF.EmitBranch(Cont);
3791 CGF.EmitBlock(Cont);
3793 // Merge the cases with a phi.
3794 Address Result = emitMergePHI(CGF, RegAddr, UsingRegs, MemAddr, UsingOverflow,
3797 // Load the pointer if the argument was passed indirectly.
3799 Result = Address(Builder.CreateLoad(Result, "aggr"),
3800 getContext().getTypeAlignInChars(Ty));
3807 PPC32TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
3808 llvm::Value *Address) const {
3809 // This is calculated from the LLVM and GCC tables and verified
3810 // against gcc output. AFAIK all ABIs use the same encoding.
3812 CodeGen::CGBuilderTy &Builder = CGF.Builder;
3814 llvm::IntegerType *i8 = CGF.Int8Ty;
3815 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
3816 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
3817 llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16);
3819 // 0-31: r0-31, the 4-byte general-purpose registers
3820 AssignToArrayRange(Builder, Address, Four8, 0, 31);
3822 // 32-63: fp0-31, the 8-byte floating-point registers
3823 AssignToArrayRange(Builder, Address, Eight8, 32, 63);
3825 // 64-76 are various 4-byte special-purpose registers:
3832 AssignToArrayRange(Builder, Address, Four8, 64, 76);
3834 // 77-108: v0-31, the 16-byte vector registers
3835 AssignToArrayRange(Builder, Address, Sixteen8, 77, 108);
3842 AssignToArrayRange(Builder, Address, Four8, 109, 113);
3850 /// PPC64_SVR4_ABIInfo - The 64-bit PowerPC ELF (SVR4) ABI information.
3851 class PPC64_SVR4_ABIInfo : public ABIInfo {
3859 static const unsigned GPRBits = 64;
3863 // A vector of float or double will be promoted to <4 x f32> or <4 x f64> and
3864 // will be passed in a QPX register.
3865 bool IsQPXVectorTy(const Type *Ty) const {
3869 if (const VectorType *VT = Ty->getAs<VectorType>()) {
3870 unsigned NumElements = VT->getNumElements();
3871 if (NumElements == 1)
3874 if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::Double)) {
3875 if (getContext().getTypeSize(Ty) <= 256)
3877 } else if (VT->getElementType()->
3878 isSpecificBuiltinType(BuiltinType::Float)) {
3879 if (getContext().getTypeSize(Ty) <= 128)
3887 bool IsQPXVectorTy(QualType Ty) const {
3888 return IsQPXVectorTy(Ty.getTypePtr());
3892 PPC64_SVR4_ABIInfo(CodeGen::CodeGenTypes &CGT, ABIKind Kind, bool HasQPX)
3893 : ABIInfo(CGT), Kind(Kind), HasQPX(HasQPX) {}
3895 bool isPromotableTypeForABI(QualType Ty) const;
3896 CharUnits getParamTypeAlignment(QualType Ty) const;
3898 ABIArgInfo classifyReturnType(QualType RetTy) const;
3899 ABIArgInfo classifyArgumentType(QualType Ty) const;
3901 bool isHomogeneousAggregateBaseType(QualType Ty) const override;
3902 bool isHomogeneousAggregateSmallEnough(const Type *Ty,
3903 uint64_t Members) const override;
3905 // TODO: We can add more logic to computeInfo to improve performance.
3906 // Example: For aggregate arguments that fit in a register, we could
3907 // use getDirectInReg (as is done below for structs containing a single
3908 // floating-point value) to avoid pushing them to memory on function
3909 // entry. This would require changing the logic in PPCISelLowering
3910 // when lowering the parameters in the caller and args in the callee.
3911 void computeInfo(CGFunctionInfo &FI) const override {
3912 if (!getCXXABI().classifyReturnType(FI))
3913 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
3914 for (auto &I : FI.arguments()) {
3915 // We rely on the default argument classification for the most part.
3916 // One exception: An aggregate containing a single floating-point
3917 // or vector item must be passed in a register if one is available.
3918 const Type *T = isSingleElementStruct(I.type, getContext());
3920 const BuiltinType *BT = T->getAs<BuiltinType>();
3921 if (IsQPXVectorTy(T) ||
3922 (T->isVectorType() && getContext().getTypeSize(T) == 128) ||
3923 (BT && BT->isFloatingPoint())) {
3925 I.info = ABIArgInfo::getDirectInReg(CGT.ConvertType(QT));
3929 I.info = classifyArgumentType(I.type);
3933 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
3934 QualType Ty) const override;
3937 class PPC64_SVR4_TargetCodeGenInfo : public TargetCodeGenInfo {
3940 PPC64_SVR4_TargetCodeGenInfo(CodeGenTypes &CGT,
3941 PPC64_SVR4_ABIInfo::ABIKind Kind, bool HasQPX)
3942 : TargetCodeGenInfo(new PPC64_SVR4_ABIInfo(CGT, Kind, HasQPX)) {}
3944 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
3945 // This is recovered from gcc output.
3946 return 1; // r1 is the dedicated stack pointer
3949 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
3950 llvm::Value *Address) const override;
3953 class PPC64TargetCodeGenInfo : public DefaultTargetCodeGenInfo {
3955 PPC64TargetCodeGenInfo(CodeGenTypes &CGT) : DefaultTargetCodeGenInfo(CGT) {}
3957 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
3958 // This is recovered from gcc output.
3959 return 1; // r1 is the dedicated stack pointer
3962 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
3963 llvm::Value *Address) const override;
3968 // Return true if the ABI requires Ty to be passed sign- or zero-
3969 // extended to 64 bits.
3971 PPC64_SVR4_ABIInfo::isPromotableTypeForABI(QualType Ty) const {
3972 // Treat an enum type as its underlying type.
3973 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
3974 Ty = EnumTy->getDecl()->getIntegerType();
3976 // Promotable integer types are required to be promoted by the ABI.
3977 if (Ty->isPromotableIntegerType())
3980 // In addition to the usual promotable integer types, we also need to
3981 // extend all 32-bit types, since the ABI requires promotion to 64 bits.
3982 if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
3983 switch (BT->getKind()) {
3984 case BuiltinType::Int:
3985 case BuiltinType::UInt:
3994 /// isAlignedParamType - Determine whether a type requires 16-byte or
3995 /// higher alignment in the parameter area. Always returns at least 8.
3996 CharUnits PPC64_SVR4_ABIInfo::getParamTypeAlignment(QualType Ty) const {
3997 // Complex types are passed just like their elements.
3998 if (const ComplexType *CTy = Ty->getAs<ComplexType>())
3999 Ty = CTy->getElementType();
4001 // Only vector types of size 16 bytes need alignment (larger types are
4002 // passed via reference, smaller types are not aligned).
4003 if (IsQPXVectorTy(Ty)) {
4004 if (getContext().getTypeSize(Ty) > 128)
4005 return CharUnits::fromQuantity(32);
4007 return CharUnits::fromQuantity(16);
4008 } else if (Ty->isVectorType()) {
4009 return CharUnits::fromQuantity(getContext().getTypeSize(Ty) == 128 ? 16 : 8);
4012 // For single-element float/vector structs, we consider the whole type
4013 // to have the same alignment requirements as its single element.
4014 const Type *AlignAsType = nullptr;
4015 const Type *EltType = isSingleElementStruct(Ty, getContext());
4017 const BuiltinType *BT = EltType->getAs<BuiltinType>();
4018 if (IsQPXVectorTy(EltType) || (EltType->isVectorType() &&
4019 getContext().getTypeSize(EltType) == 128) ||
4020 (BT && BT->isFloatingPoint()))
4021 AlignAsType = EltType;
4024 // Likewise for ELFv2 homogeneous aggregates.
4025 const Type *Base = nullptr;
4026 uint64_t Members = 0;
4027 if (!AlignAsType && Kind == ELFv2 &&
4028 isAggregateTypeForABI(Ty) && isHomogeneousAggregate(Ty, Base, Members))
4031 // With special case aggregates, only vector base types need alignment.
4032 if (AlignAsType && IsQPXVectorTy(AlignAsType)) {
4033 if (getContext().getTypeSize(AlignAsType) > 128)
4034 return CharUnits::fromQuantity(32);
4036 return CharUnits::fromQuantity(16);
4037 } else if (AlignAsType) {
4038 return CharUnits::fromQuantity(AlignAsType->isVectorType() ? 16 : 8);
4041 // Otherwise, we only need alignment for any aggregate type that
4042 // has an alignment requirement of >= 16 bytes.
4043 if (isAggregateTypeForABI(Ty) && getContext().getTypeAlign(Ty) >= 128) {
4044 if (HasQPX && getContext().getTypeAlign(Ty) >= 256)
4045 return CharUnits::fromQuantity(32);
4046 return CharUnits::fromQuantity(16);
4049 return CharUnits::fromQuantity(8);
4052 /// isHomogeneousAggregate - Return true if a type is an ELFv2 homogeneous
4053 /// aggregate. Base is set to the base element type, and Members is set
4054 /// to the number of base elements.
4055 bool ABIInfo::isHomogeneousAggregate(QualType Ty, const Type *&Base,
4056 uint64_t &Members) const {
4057 if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
4058 uint64_t NElements = AT->getSize().getZExtValue();
4061 if (!isHomogeneousAggregate(AT->getElementType(), Base, Members))
4063 Members *= NElements;
4064 } else if (const RecordType *RT = Ty->getAs<RecordType>()) {
4065 const RecordDecl *RD = RT->getDecl();
4066 if (RD->hasFlexibleArrayMember())
4071 // If this is a C++ record, check the bases first.
4072 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
4073 for (const auto &I : CXXRD->bases()) {
4074 // Ignore empty records.
4075 if (isEmptyRecord(getContext(), I.getType(), true))
4078 uint64_t FldMembers;
4079 if (!isHomogeneousAggregate(I.getType(), Base, FldMembers))
4082 Members += FldMembers;
4086 for (const auto *FD : RD->fields()) {
4087 // Ignore (non-zero arrays of) empty records.
4088 QualType FT = FD->getType();
4089 while (const ConstantArrayType *AT =
4090 getContext().getAsConstantArrayType(FT)) {
4091 if (AT->getSize().getZExtValue() == 0)
4093 FT = AT->getElementType();
4095 if (isEmptyRecord(getContext(), FT, true))
4098 // For compatibility with GCC, ignore empty bitfields in C++ mode.
4099 if (getContext().getLangOpts().CPlusPlus &&
4100 FD->isBitField() && FD->getBitWidthValue(getContext()) == 0)
4103 uint64_t FldMembers;
4104 if (!isHomogeneousAggregate(FD->getType(), Base, FldMembers))
4107 Members = (RD->isUnion() ?
4108 std::max(Members, FldMembers) : Members + FldMembers);
4114 // Ensure there is no padding.
4115 if (getContext().getTypeSize(Base) * Members !=
4116 getContext().getTypeSize(Ty))
4120 if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
4122 Ty = CT->getElementType();
4125 // Most ABIs only support float, double, and some vector type widths.
4126 if (!isHomogeneousAggregateBaseType(Ty))
4129 // The base type must be the same for all members. Types that
4130 // agree in both total size and mode (float vs. vector) are
4131 // treated as being equivalent here.
4132 const Type *TyPtr = Ty.getTypePtr();
4135 // If it's a non-power-of-2 vector, its size is already a power-of-2,
4136 // so make sure to widen it explicitly.
4137 if (const VectorType *VT = Base->getAs<VectorType>()) {
4138 QualType EltTy = VT->getElementType();
4139 unsigned NumElements =
4140 getContext().getTypeSize(VT) / getContext().getTypeSize(EltTy);
4142 .getVectorType(EltTy, NumElements, VT->getVectorKind())
4147 if (Base->isVectorType() != TyPtr->isVectorType() ||
4148 getContext().getTypeSize(Base) != getContext().getTypeSize(TyPtr))
4151 return Members > 0 && isHomogeneousAggregateSmallEnough(Base, Members);
4154 bool PPC64_SVR4_ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
4155 // Homogeneous aggregates for ELFv2 must have base types of float,
4156 // double, long double, or 128-bit vectors.
4157 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
4158 if (BT->getKind() == BuiltinType::Float ||
4159 BT->getKind() == BuiltinType::Double ||
4160 BT->getKind() == BuiltinType::LongDouble)
4163 if (const VectorType *VT = Ty->getAs<VectorType>()) {
4164 if (getContext().getTypeSize(VT) == 128 || IsQPXVectorTy(Ty))
4170 bool PPC64_SVR4_ABIInfo::isHomogeneousAggregateSmallEnough(
4171 const Type *Base, uint64_t Members) const {
4172 // Vector types require one register, floating point types require one
4173 // or two registers depending on their size.
4175 Base->isVectorType() ? 1 : (getContext().getTypeSize(Base) + 63) / 64;
4177 // Homogeneous Aggregates may occupy at most 8 registers.
4178 return Members * NumRegs <= 8;
4182 PPC64_SVR4_ABIInfo::classifyArgumentType(QualType Ty) const {
4183 Ty = useFirstFieldIfTransparentUnion(Ty);
4185 if (Ty->isAnyComplexType())
4186 return ABIArgInfo::getDirect();
4188 // Non-Altivec vector types are passed in GPRs (smaller than 16 bytes)
4189 // or via reference (larger than 16 bytes).
4190 if (Ty->isVectorType() && !IsQPXVectorTy(Ty)) {
4191 uint64_t Size = getContext().getTypeSize(Ty);
4193 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
4194 else if (Size < 128) {
4195 llvm::Type *CoerceTy = llvm::IntegerType::get(getVMContext(), Size);
4196 return ABIArgInfo::getDirect(CoerceTy);
4200 if (isAggregateTypeForABI(Ty)) {
4201 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
4202 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
4204 uint64_t ABIAlign = getParamTypeAlignment(Ty).getQuantity();
4205 uint64_t TyAlign = getContext().getTypeAlignInChars(Ty).getQuantity();
4207 // ELFv2 homogeneous aggregates are passed as array types.
4208 const Type *Base = nullptr;
4209 uint64_t Members = 0;
4210 if (Kind == ELFv2 &&
4211 isHomogeneousAggregate(Ty, Base, Members)) {
4212 llvm::Type *BaseTy = CGT.ConvertType(QualType(Base, 0));
4213 llvm::Type *CoerceTy = llvm::ArrayType::get(BaseTy, Members);
4214 return ABIArgInfo::getDirect(CoerceTy);
4217 // If an aggregate may end up fully in registers, we do not
4218 // use the ByVal method, but pass the aggregate as array.
4219 // This is usually beneficial since we avoid forcing the
4220 // back-end to store the argument to memory.
4221 uint64_t Bits = getContext().getTypeSize(Ty);
4222 if (Bits > 0 && Bits <= 8 * GPRBits) {
4223 llvm::Type *CoerceTy;
4225 // Types up to 8 bytes are passed as integer type (which will be
4226 // properly aligned in the argument save area doubleword).
4227 if (Bits <= GPRBits)
4229 llvm::IntegerType::get(getVMContext(), llvm::alignTo(Bits, 8));
4230 // Larger types are passed as arrays, with the base type selected
4231 // according to the required alignment in the save area.
4233 uint64_t RegBits = ABIAlign * 8;
4234 uint64_t NumRegs = llvm::alignTo(Bits, RegBits) / RegBits;
4235 llvm::Type *RegTy = llvm::IntegerType::get(getVMContext(), RegBits);
4236 CoerceTy = llvm::ArrayType::get(RegTy, NumRegs);
4239 return ABIArgInfo::getDirect(CoerceTy);
4242 // All other aggregates are passed ByVal.
4243 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(ABIAlign),
4245 /*Realign=*/TyAlign > ABIAlign);
4248 return (isPromotableTypeForABI(Ty) ?
4249 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
4253 PPC64_SVR4_ABIInfo::classifyReturnType(QualType RetTy) const {
4254 if (RetTy->isVoidType())
4255 return ABIArgInfo::getIgnore();
4257 if (RetTy->isAnyComplexType())
4258 return ABIArgInfo::getDirect();
4260 // Non-Altivec vector types are returned in GPRs (smaller than 16 bytes)
4261 // or via reference (larger than 16 bytes).
4262 if (RetTy->isVectorType() && !IsQPXVectorTy(RetTy)) {
4263 uint64_t Size = getContext().getTypeSize(RetTy);
4265 return getNaturalAlignIndirect(RetTy);
4266 else if (Size < 128) {
4267 llvm::Type *CoerceTy = llvm::IntegerType::get(getVMContext(), Size);
4268 return ABIArgInfo::getDirect(CoerceTy);
4272 if (isAggregateTypeForABI(RetTy)) {
4273 // ELFv2 homogeneous aggregates are returned as array types.
4274 const Type *Base = nullptr;
4275 uint64_t Members = 0;
4276 if (Kind == ELFv2 &&
4277 isHomogeneousAggregate(RetTy, Base, Members)) {
4278 llvm::Type *BaseTy = CGT.ConvertType(QualType(Base, 0));
4279 llvm::Type *CoerceTy = llvm::ArrayType::get(BaseTy, Members);
4280 return ABIArgInfo::getDirect(CoerceTy);
4283 // ELFv2 small aggregates are returned in up to two registers.
4284 uint64_t Bits = getContext().getTypeSize(RetTy);
4285 if (Kind == ELFv2 && Bits <= 2 * GPRBits) {
4287 return ABIArgInfo::getIgnore();
4289 llvm::Type *CoerceTy;
4290 if (Bits > GPRBits) {
4291 CoerceTy = llvm::IntegerType::get(getVMContext(), GPRBits);
4292 CoerceTy = llvm::StructType::get(CoerceTy, CoerceTy, nullptr);
4295 llvm::IntegerType::get(getVMContext(), llvm::alignTo(Bits, 8));
4296 return ABIArgInfo::getDirect(CoerceTy);
4299 // All other aggregates are returned indirectly.
4300 return getNaturalAlignIndirect(RetTy);
4303 return (isPromotableTypeForABI(RetTy) ?
4304 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
4307 // Based on ARMABIInfo::EmitVAArg, adjusted for 64-bit machine.
4308 Address PPC64_SVR4_ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
4309 QualType Ty) const {
4310 auto TypeInfo = getContext().getTypeInfoInChars(Ty);
4311 TypeInfo.second = getParamTypeAlignment(Ty);
4313 CharUnits SlotSize = CharUnits::fromQuantity(8);
4315 // If we have a complex type and the base type is smaller than 8 bytes,
4316 // the ABI calls for the real and imaginary parts to be right-adjusted
4317 // in separate doublewords. However, Clang expects us to produce a
4318 // pointer to a structure with the two parts packed tightly. So generate
4319 // loads of the real and imaginary parts relative to the va_list pointer,
4320 // and store them to a temporary structure.
4321 if (const ComplexType *CTy = Ty->getAs<ComplexType>()) {
4322 CharUnits EltSize = TypeInfo.first / 2;
4323 if (EltSize < SlotSize) {
4324 Address Addr = emitVoidPtrDirectVAArg(CGF, VAListAddr, CGF.Int8Ty,
4325 SlotSize * 2, SlotSize,
4326 SlotSize, /*AllowHigher*/ true);
4328 Address RealAddr = Addr;
4329 Address ImagAddr = RealAddr;
4330 if (CGF.CGM.getDataLayout().isBigEndian()) {
4331 RealAddr = CGF.Builder.CreateConstInBoundsByteGEP(RealAddr,
4332 SlotSize - EltSize);
4333 ImagAddr = CGF.Builder.CreateConstInBoundsByteGEP(ImagAddr,
4334 2 * SlotSize - EltSize);
4336 ImagAddr = CGF.Builder.CreateConstInBoundsByteGEP(RealAddr, SlotSize);
4339 llvm::Type *EltTy = CGF.ConvertTypeForMem(CTy->getElementType());
4340 RealAddr = CGF.Builder.CreateElementBitCast(RealAddr, EltTy);
4341 ImagAddr = CGF.Builder.CreateElementBitCast(ImagAddr, EltTy);
4342 llvm::Value *Real = CGF.Builder.CreateLoad(RealAddr, ".vareal");
4343 llvm::Value *Imag = CGF.Builder.CreateLoad(ImagAddr, ".vaimag");
4345 Address Temp = CGF.CreateMemTemp(Ty, "vacplx");
4346 CGF.EmitStoreOfComplex({Real, Imag}, CGF.MakeAddrLValue(Temp, Ty),
4352 // Otherwise, just use the general rule.
4353 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*Indirect*/ false,
4354 TypeInfo, SlotSize, /*AllowHigher*/ true);
4358 PPC64_initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
4359 llvm::Value *Address) {
4360 // This is calculated from the LLVM and GCC tables and verified
4361 // against gcc output. AFAIK all ABIs use the same encoding.
4363 CodeGen::CGBuilderTy &Builder = CGF.Builder;
4365 llvm::IntegerType *i8 = CGF.Int8Ty;
4366 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
4367 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
4368 llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16);
4370 // 0-31: r0-31, the 8-byte general-purpose registers
4371 AssignToArrayRange(Builder, Address, Eight8, 0, 31);
4373 // 32-63: fp0-31, the 8-byte floating-point registers
4374 AssignToArrayRange(Builder, Address, Eight8, 32, 63);
4376 // 64-76 are various 4-byte special-purpose registers:
4383 AssignToArrayRange(Builder, Address, Four8, 64, 76);
4385 // 77-108: v0-31, the 16-byte vector registers
4386 AssignToArrayRange(Builder, Address, Sixteen8, 77, 108);
4393 AssignToArrayRange(Builder, Address, Four8, 109, 113);
4399 PPC64_SVR4_TargetCodeGenInfo::initDwarfEHRegSizeTable(
4400 CodeGen::CodeGenFunction &CGF,
4401 llvm::Value *Address) const {
4403 return PPC64_initDwarfEHRegSizeTable(CGF, Address);
4407 PPC64TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
4408 llvm::Value *Address) const {
4410 return PPC64_initDwarfEHRegSizeTable(CGF, Address);
4413 //===----------------------------------------------------------------------===//
4414 // AArch64 ABI Implementation
4415 //===----------------------------------------------------------------------===//
4419 class AArch64ABIInfo : public SwiftABIInfo {
4430 AArch64ABIInfo(CodeGenTypes &CGT, ABIKind Kind)
4431 : SwiftABIInfo(CGT), Kind(Kind) {}
4434 ABIKind getABIKind() const { return Kind; }
4435 bool isDarwinPCS() const { return Kind == DarwinPCS; }
4437 ABIArgInfo classifyReturnType(QualType RetTy) const;
4438 ABIArgInfo classifyArgumentType(QualType RetTy) const;
4439 bool isHomogeneousAggregateBaseType(QualType Ty) const override;
4440 bool isHomogeneousAggregateSmallEnough(const Type *Ty,
4441 uint64_t Members) const override;
4443 bool isIllegalVectorType(QualType Ty) const;
4445 void computeInfo(CGFunctionInfo &FI) const override {
4446 if (!getCXXABI().classifyReturnType(FI))
4447 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
4449 for (auto &it : FI.arguments())
4450 it.info = classifyArgumentType(it.type);
4453 Address EmitDarwinVAArg(Address VAListAddr, QualType Ty,
4454 CodeGenFunction &CGF) const;
4456 Address EmitAAPCSVAArg(Address VAListAddr, QualType Ty,
4457 CodeGenFunction &CGF) const;
4459 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
4460 QualType Ty) const override {
4461 return isDarwinPCS() ? EmitDarwinVAArg(VAListAddr, Ty, CGF)
4462 : EmitAAPCSVAArg(VAListAddr, Ty, CGF);
4465 bool shouldPassIndirectlyForSwift(CharUnits totalSize,
4466 ArrayRef<llvm::Type*> scalars,
4467 bool asReturnValue) const override {
4468 return occupiesMoreThan(CGT, scalars, /*total*/ 4);
4472 class AArch64TargetCodeGenInfo : public TargetCodeGenInfo {
4474 AArch64TargetCodeGenInfo(CodeGenTypes &CGT, AArch64ABIInfo::ABIKind Kind)
4475 : TargetCodeGenInfo(new AArch64ABIInfo(CGT, Kind)) {}
4477 StringRef getARCRetainAutoreleasedReturnValueMarker() const override {
4478 return "mov\tfp, fp\t\t; marker for objc_retainAutoreleaseReturnValue";
4481 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
4485 bool doesReturnSlotInterfereWithArgs() const override { return false; }
4489 ABIArgInfo AArch64ABIInfo::classifyArgumentType(QualType Ty) const {
4490 Ty = useFirstFieldIfTransparentUnion(Ty);
4492 // Handle illegal vector types here.
4493 if (isIllegalVectorType(Ty)) {
4494 uint64_t Size = getContext().getTypeSize(Ty);
4495 // Android promotes <2 x i8> to i16, not i32
4496 if (isAndroid() && (Size <= 16)) {
4497 llvm::Type *ResType = llvm::Type::getInt16Ty(getVMContext());
4498 return ABIArgInfo::getDirect(ResType);
4501 llvm::Type *ResType = llvm::Type::getInt32Ty(getVMContext());
4502 return ABIArgInfo::getDirect(ResType);
4505 llvm::Type *ResType =
4506 llvm::VectorType::get(llvm::Type::getInt32Ty(getVMContext()), 2);
4507 return ABIArgInfo::getDirect(ResType);
4510 llvm::Type *ResType =
4511 llvm::VectorType::get(llvm::Type::getInt32Ty(getVMContext()), 4);
4512 return ABIArgInfo::getDirect(ResType);
4514 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
4517 if (!isAggregateTypeForABI(Ty)) {
4518 // Treat an enum type as its underlying type.
4519 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
4520 Ty = EnumTy->getDecl()->getIntegerType();
4522 return (Ty->isPromotableIntegerType() && isDarwinPCS()
4523 ? ABIArgInfo::getExtend()
4524 : ABIArgInfo::getDirect());
4527 // Structures with either a non-trivial destructor or a non-trivial
4528 // copy constructor are always indirect.
4529 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) {
4530 return getNaturalAlignIndirect(Ty, /*ByVal=*/RAA ==
4531 CGCXXABI::RAA_DirectInMemory);
4534 // Empty records are always ignored on Darwin, but actually passed in C++ mode
4535 // elsewhere for GNU compatibility.
4536 if (isEmptyRecord(getContext(), Ty, true)) {
4537 if (!getContext().getLangOpts().CPlusPlus || isDarwinPCS())
4538 return ABIArgInfo::getIgnore();
4540 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
4543 // Homogeneous Floating-point Aggregates (HFAs) need to be expanded.
4544 const Type *Base = nullptr;
4545 uint64_t Members = 0;
4546 if (isHomogeneousAggregate(Ty, Base, Members)) {
4547 return ABIArgInfo::getDirect(
4548 llvm::ArrayType::get(CGT.ConvertType(QualType(Base, 0)), Members));
4551 // Aggregates <= 16 bytes are passed directly in registers or on the stack.
4552 uint64_t Size = getContext().getTypeSize(Ty);
4554 unsigned Alignment = getContext().getTypeAlign(Ty);
4555 Size = 64 * ((Size + 63) / 64); // round up to multiple of 8 bytes
4557 // We use a pair of i64 for 16-byte aggregate with 8-byte alignment.
4558 // For aggregates with 16-byte alignment, we use i128.
4559 if (Alignment < 128 && Size == 128) {
4560 llvm::Type *BaseTy = llvm::Type::getInt64Ty(getVMContext());
4561 return ABIArgInfo::getDirect(llvm::ArrayType::get(BaseTy, Size / 64));
4563 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Size));
4566 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
4569 ABIArgInfo AArch64ABIInfo::classifyReturnType(QualType RetTy) const {
4570 if (RetTy->isVoidType())
4571 return ABIArgInfo::getIgnore();
4573 // Large vector types should be returned via memory.
4574 if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 128)
4575 return getNaturalAlignIndirect(RetTy);
4577 if (!isAggregateTypeForABI(RetTy)) {
4578 // Treat an enum type as its underlying type.
4579 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
4580 RetTy = EnumTy->getDecl()->getIntegerType();
4582 return (RetTy->isPromotableIntegerType() && isDarwinPCS()
4583 ? ABIArgInfo::getExtend()
4584 : ABIArgInfo::getDirect());
4587 if (isEmptyRecord(getContext(), RetTy, true))
4588 return ABIArgInfo::getIgnore();
4590 const Type *Base = nullptr;
4591 uint64_t Members = 0;
4592 if (isHomogeneousAggregate(RetTy, Base, Members))
4593 // Homogeneous Floating-point Aggregates (HFAs) are returned directly.
4594 return ABIArgInfo::getDirect();
4596 // Aggregates <= 16 bytes are returned directly in registers or on the stack.
4597 uint64_t Size = getContext().getTypeSize(RetTy);
4599 unsigned Alignment = getContext().getTypeAlign(RetTy);
4600 Size = 64 * ((Size + 63) / 64); // round up to multiple of 8 bytes
4602 // We use a pair of i64 for 16-byte aggregate with 8-byte alignment.
4603 // For aggregates with 16-byte alignment, we use i128.
4604 if (Alignment < 128 && Size == 128) {
4605 llvm::Type *BaseTy = llvm::Type::getInt64Ty(getVMContext());
4606 return ABIArgInfo::getDirect(llvm::ArrayType::get(BaseTy, Size / 64));
4608 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Size));
4611 return getNaturalAlignIndirect(RetTy);
4614 /// isIllegalVectorType - check whether the vector type is legal for AArch64.
4615 bool AArch64ABIInfo::isIllegalVectorType(QualType Ty) const {
4616 if (const VectorType *VT = Ty->getAs<VectorType>()) {
4617 // Check whether VT is legal.
4618 unsigned NumElements = VT->getNumElements();
4619 uint64_t Size = getContext().getTypeSize(VT);
4620 // NumElements should be power of 2.
4621 if (!llvm::isPowerOf2_32(NumElements))
4623 return Size != 64 && (Size != 128 || NumElements == 1);
4628 bool AArch64ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
4629 // Homogeneous aggregates for AAPCS64 must have base types of a floating
4630 // point type or a short-vector type. This is the same as the 32-bit ABI,
4631 // but with the difference that any floating-point type is allowed,
4632 // including __fp16.
4633 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
4634 if (BT->isFloatingPoint())
4636 } else if (const VectorType *VT = Ty->getAs<VectorType>()) {
4637 unsigned VecSize = getContext().getTypeSize(VT);
4638 if (VecSize == 64 || VecSize == 128)
4644 bool AArch64ABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base,
4645 uint64_t Members) const {
4646 return Members <= 4;
4649 Address AArch64ABIInfo::EmitAAPCSVAArg(Address VAListAddr,
4651 CodeGenFunction &CGF) const {
4652 ABIArgInfo AI = classifyArgumentType(Ty);
4653 bool IsIndirect = AI.isIndirect();
4655 llvm::Type *BaseTy = CGF.ConvertType(Ty);
4657 BaseTy = llvm::PointerType::getUnqual(BaseTy);
4658 else if (AI.getCoerceToType())
4659 BaseTy = AI.getCoerceToType();
4661 unsigned NumRegs = 1;
4662 if (llvm::ArrayType *ArrTy = dyn_cast<llvm::ArrayType>(BaseTy)) {
4663 BaseTy = ArrTy->getElementType();
4664 NumRegs = ArrTy->getNumElements();
4666 bool IsFPR = BaseTy->isFloatingPointTy() || BaseTy->isVectorTy();
4668 // The AArch64 va_list type and handling is specified in the Procedure Call
4669 // Standard, section B.4:
4679 llvm::BasicBlock *MaybeRegBlock = CGF.createBasicBlock("vaarg.maybe_reg");
4680 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
4681 llvm::BasicBlock *OnStackBlock = CGF.createBasicBlock("vaarg.on_stack");
4682 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
4684 auto TyInfo = getContext().getTypeInfoInChars(Ty);
4685 CharUnits TyAlign = TyInfo.second;
4687 Address reg_offs_p = Address::invalid();
4688 llvm::Value *reg_offs = nullptr;
4690 CharUnits reg_top_offset;
4691 int RegSize = IsIndirect ? 8 : TyInfo.first.getQuantity();
4693 // 3 is the field number of __gr_offs
4695 CGF.Builder.CreateStructGEP(VAListAddr, 3, CharUnits::fromQuantity(24),
4697 reg_offs = CGF.Builder.CreateLoad(reg_offs_p, "gr_offs");
4698 reg_top_index = 1; // field number for __gr_top
4699 reg_top_offset = CharUnits::fromQuantity(8);
4700 RegSize = llvm::alignTo(RegSize, 8);
4702 // 4 is the field number of __vr_offs.
4704 CGF.Builder.CreateStructGEP(VAListAddr, 4, CharUnits::fromQuantity(28),
4706 reg_offs = CGF.Builder.CreateLoad(reg_offs_p, "vr_offs");
4707 reg_top_index = 2; // field number for __vr_top
4708 reg_top_offset = CharUnits::fromQuantity(16);
4709 RegSize = 16 * NumRegs;
4712 //=======================================
4713 // Find out where argument was passed
4714 //=======================================
4716 // If reg_offs >= 0 we're already using the stack for this type of
4717 // argument. We don't want to keep updating reg_offs (in case it overflows,
4718 // though anyone passing 2GB of arguments, each at most 16 bytes, deserves
4719 // whatever they get).
4720 llvm::Value *UsingStack = nullptr;
4721 UsingStack = CGF.Builder.CreateICmpSGE(
4722 reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, 0));
4724 CGF.Builder.CreateCondBr(UsingStack, OnStackBlock, MaybeRegBlock);
4726 // Otherwise, at least some kind of argument could go in these registers, the
4727 // question is whether this particular type is too big.
4728 CGF.EmitBlock(MaybeRegBlock);
4730 // Integer arguments may need to correct register alignment (for example a
4731 // "struct { __int128 a; };" gets passed in x_2N, x_{2N+1}). In this case we
4732 // align __gr_offs to calculate the potential address.
4733 if (!IsFPR && !IsIndirect && TyAlign.getQuantity() > 8) {
4734 int Align = TyAlign.getQuantity();
4736 reg_offs = CGF.Builder.CreateAdd(
4737 reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, Align - 1),
4739 reg_offs = CGF.Builder.CreateAnd(
4740 reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, -Align),
4744 // Update the gr_offs/vr_offs pointer for next call to va_arg on this va_list.
4745 // The fact that this is done unconditionally reflects the fact that
4746 // allocating an argument to the stack also uses up all the remaining
4747 // registers of the appropriate kind.
4748 llvm::Value *NewOffset = nullptr;
4749 NewOffset = CGF.Builder.CreateAdd(
4750 reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, RegSize), "new_reg_offs");
4751 CGF.Builder.CreateStore(NewOffset, reg_offs_p);
4753 // Now we're in a position to decide whether this argument really was in
4754 // registers or not.
4755 llvm::Value *InRegs = nullptr;
4756 InRegs = CGF.Builder.CreateICmpSLE(
4757 NewOffset, llvm::ConstantInt::get(CGF.Int32Ty, 0), "inreg");
4759 CGF.Builder.CreateCondBr(InRegs, InRegBlock, OnStackBlock);
4761 //=======================================
4762 // Argument was in registers
4763 //=======================================
4765 // Now we emit the code for if the argument was originally passed in
4766 // registers. First start the appropriate block:
4767 CGF.EmitBlock(InRegBlock);
4769 llvm::Value *reg_top = nullptr;
4770 Address reg_top_p = CGF.Builder.CreateStructGEP(VAListAddr, reg_top_index,
4771 reg_top_offset, "reg_top_p");
4772 reg_top = CGF.Builder.CreateLoad(reg_top_p, "reg_top");
4773 Address BaseAddr(CGF.Builder.CreateInBoundsGEP(reg_top, reg_offs),
4774 CharUnits::fromQuantity(IsFPR ? 16 : 8));
4775 Address RegAddr = Address::invalid();
4776 llvm::Type *MemTy = CGF.ConvertTypeForMem(Ty);
4779 // If it's been passed indirectly (actually a struct), whatever we find from
4780 // stored registers or on the stack will actually be a struct **.
4781 MemTy = llvm::PointerType::getUnqual(MemTy);
4784 const Type *Base = nullptr;
4785 uint64_t NumMembers = 0;
4786 bool IsHFA = isHomogeneousAggregate(Ty, Base, NumMembers);
4787 if (IsHFA && NumMembers > 1) {
4788 // Homogeneous aggregates passed in registers will have their elements split
4789 // and stored 16-bytes apart regardless of size (they're notionally in qN,
4790 // qN+1, ...). We reload and store into a temporary local variable
4792 assert(!IsIndirect && "Homogeneous aggregates should be passed directly");
4793 auto BaseTyInfo = getContext().getTypeInfoInChars(QualType(Base, 0));
4794 llvm::Type *BaseTy = CGF.ConvertType(QualType(Base, 0));
4795 llvm::Type *HFATy = llvm::ArrayType::get(BaseTy, NumMembers);
4796 Address Tmp = CGF.CreateTempAlloca(HFATy,
4797 std::max(TyAlign, BaseTyInfo.second));
4799 // On big-endian platforms, the value will be right-aligned in its slot.
4801 if (CGF.CGM.getDataLayout().isBigEndian() &&
4802 BaseTyInfo.first.getQuantity() < 16)
4803 Offset = 16 - BaseTyInfo.first.getQuantity();
4805 for (unsigned i = 0; i < NumMembers; ++i) {
4806 CharUnits BaseOffset = CharUnits::fromQuantity(16 * i + Offset);
4808 CGF.Builder.CreateConstInBoundsByteGEP(BaseAddr, BaseOffset);
4809 LoadAddr = CGF.Builder.CreateElementBitCast(LoadAddr, BaseTy);
4812 CGF.Builder.CreateConstArrayGEP(Tmp, i, BaseTyInfo.first);
4814 llvm::Value *Elem = CGF.Builder.CreateLoad(LoadAddr);
4815 CGF.Builder.CreateStore(Elem, StoreAddr);
4818 RegAddr = CGF.Builder.CreateElementBitCast(Tmp, MemTy);
4820 // Otherwise the object is contiguous in memory.
4822 // It might be right-aligned in its slot.
4823 CharUnits SlotSize = BaseAddr.getAlignment();
4824 if (CGF.CGM.getDataLayout().isBigEndian() && !IsIndirect &&
4825 (IsHFA || !isAggregateTypeForABI(Ty)) &&
4826 TyInfo.first < SlotSize) {
4827 CharUnits Offset = SlotSize - TyInfo.first;
4828 BaseAddr = CGF.Builder.CreateConstInBoundsByteGEP(BaseAddr, Offset);
4831 RegAddr = CGF.Builder.CreateElementBitCast(BaseAddr, MemTy);
4834 CGF.EmitBranch(ContBlock);
4836 //=======================================
4837 // Argument was on the stack
4838 //=======================================
4839 CGF.EmitBlock(OnStackBlock);
4841 Address stack_p = CGF.Builder.CreateStructGEP(VAListAddr, 0,
4842 CharUnits::Zero(), "stack_p");
4843 llvm::Value *OnStackPtr = CGF.Builder.CreateLoad(stack_p, "stack");
4845 // Again, stack arguments may need realignment. In this case both integer and
4846 // floating-point ones might be affected.
4847 if (!IsIndirect && TyAlign.getQuantity() > 8) {
4848 int Align = TyAlign.getQuantity();
4850 OnStackPtr = CGF.Builder.CreatePtrToInt(OnStackPtr, CGF.Int64Ty);
4852 OnStackPtr = CGF.Builder.CreateAdd(
4853 OnStackPtr, llvm::ConstantInt::get(CGF.Int64Ty, Align - 1),
4855 OnStackPtr = CGF.Builder.CreateAnd(
4856 OnStackPtr, llvm::ConstantInt::get(CGF.Int64Ty, -Align),
4859 OnStackPtr = CGF.Builder.CreateIntToPtr(OnStackPtr, CGF.Int8PtrTy);
4861 Address OnStackAddr(OnStackPtr,
4862 std::max(CharUnits::fromQuantity(8), TyAlign));
4864 // All stack slots are multiples of 8 bytes.
4865 CharUnits StackSlotSize = CharUnits::fromQuantity(8);
4866 CharUnits StackSize;
4868 StackSize = StackSlotSize;
4870 StackSize = TyInfo.first.alignTo(StackSlotSize);
4872 llvm::Value *StackSizeC = CGF.Builder.getSize(StackSize);
4873 llvm::Value *NewStack =
4874 CGF.Builder.CreateInBoundsGEP(OnStackPtr, StackSizeC, "new_stack");
4876 // Write the new value of __stack for the next call to va_arg
4877 CGF.Builder.CreateStore(NewStack, stack_p);
4879 if (CGF.CGM.getDataLayout().isBigEndian() && !isAggregateTypeForABI(Ty) &&
4880 TyInfo.first < StackSlotSize) {
4881 CharUnits Offset = StackSlotSize - TyInfo.first;
4882 OnStackAddr = CGF.Builder.CreateConstInBoundsByteGEP(OnStackAddr, Offset);
4885 OnStackAddr = CGF.Builder.CreateElementBitCast(OnStackAddr, MemTy);
4887 CGF.EmitBranch(ContBlock);
4889 //=======================================
4891 //=======================================
4892 CGF.EmitBlock(ContBlock);
4894 Address ResAddr = emitMergePHI(CGF, RegAddr, InRegBlock,
4895 OnStackAddr, OnStackBlock, "vaargs.addr");
4898 return Address(CGF.Builder.CreateLoad(ResAddr, "vaarg.addr"),
4904 Address AArch64ABIInfo::EmitDarwinVAArg(Address VAListAddr, QualType Ty,
4905 CodeGenFunction &CGF) const {
4906 // The backend's lowering doesn't support va_arg for aggregates or
4907 // illegal vector types. Lower VAArg here for these cases and use
4908 // the LLVM va_arg instruction for everything else.
4909 if (!isAggregateTypeForABI(Ty) && !isIllegalVectorType(Ty))
4910 return EmitVAArgInstr(CGF, VAListAddr, Ty, ABIArgInfo::getDirect());
4912 CharUnits SlotSize = CharUnits::fromQuantity(8);
4914 // Empty records are ignored for parameter passing purposes.
4915 if (isEmptyRecord(getContext(), Ty, true)) {
4916 Address Addr(CGF.Builder.CreateLoad(VAListAddr, "ap.cur"), SlotSize);
4917 Addr = CGF.Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(Ty));
4921 // The size of the actual thing passed, which might end up just
4922 // being a pointer for indirect types.
4923 auto TyInfo = getContext().getTypeInfoInChars(Ty);
4925 // Arguments bigger than 16 bytes which aren't homogeneous
4926 // aggregates should be passed indirectly.
4927 bool IsIndirect = false;
4928 if (TyInfo.first.getQuantity() > 16) {
4929 const Type *Base = nullptr;
4930 uint64_t Members = 0;
4931 IsIndirect = !isHomogeneousAggregate(Ty, Base, Members);
4934 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect,
4935 TyInfo, SlotSize, /*AllowHigherAlign*/ true);
4938 //===----------------------------------------------------------------------===//
4939 // ARM ABI Implementation
4940 //===----------------------------------------------------------------------===//
4944 class ARMABIInfo : public SwiftABIInfo {
4957 ARMABIInfo(CodeGenTypes &CGT, ABIKind _Kind)
4958 : SwiftABIInfo(CGT), Kind(_Kind) {
4962 bool isEABI() const {
4963 switch (getTarget().getTriple().getEnvironment()) {
4964 case llvm::Triple::Android:
4965 case llvm::Triple::EABI:
4966 case llvm::Triple::EABIHF:
4967 case llvm::Triple::GNUEABI:
4968 case llvm::Triple::GNUEABIHF:
4969 case llvm::Triple::MuslEABI:
4970 case llvm::Triple::MuslEABIHF:
4977 bool isEABIHF() const {
4978 switch (getTarget().getTriple().getEnvironment()) {
4979 case llvm::Triple::EABIHF:
4980 case llvm::Triple::GNUEABIHF:
4981 case llvm::Triple::MuslEABIHF:
4988 ABIKind getABIKind() const { return Kind; }
4991 ABIArgInfo classifyReturnType(QualType RetTy, bool isVariadic) const;
4992 ABIArgInfo classifyArgumentType(QualType RetTy, bool isVariadic) const;
4993 bool isIllegalVectorType(QualType Ty) const;
4995 bool isHomogeneousAggregateBaseType(QualType Ty) const override;
4996 bool isHomogeneousAggregateSmallEnough(const Type *Ty,
4997 uint64_t Members) const override;
4999 void computeInfo(CGFunctionInfo &FI) const override;
5001 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
5002 QualType Ty) const override;
5004 llvm::CallingConv::ID getLLVMDefaultCC() const;
5005 llvm::CallingConv::ID getABIDefaultCC() const;
5008 bool shouldPassIndirectlyForSwift(CharUnits totalSize,
5009 ArrayRef<llvm::Type*> scalars,
5010 bool asReturnValue) const override {
5011 return occupiesMoreThan(CGT, scalars, /*total*/ 4);
5015 class ARMTargetCodeGenInfo : public TargetCodeGenInfo {
5017 ARMTargetCodeGenInfo(CodeGenTypes &CGT, ARMABIInfo::ABIKind K)
5018 :TargetCodeGenInfo(new ARMABIInfo(CGT, K)) {}
5020 const ARMABIInfo &getABIInfo() const {
5021 return static_cast<const ARMABIInfo&>(TargetCodeGenInfo::getABIInfo());
5024 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
5028 StringRef getARCRetainAutoreleasedReturnValueMarker() const override {
5029 return "mov\tr7, r7\t\t@ marker for objc_retainAutoreleaseReturnValue";
5032 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
5033 llvm::Value *Address) const override {
5034 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4);
5036 // 0-15 are the 16 integer registers.
5037 AssignToArrayRange(CGF.Builder, Address, Four8, 0, 15);
5041 unsigned getSizeOfUnwindException() const override {
5042 if (getABIInfo().isEABI()) return 88;
5043 return TargetCodeGenInfo::getSizeOfUnwindException();
5046 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
5047 CodeGen::CodeGenModule &CGM) const override {
5048 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
5052 const ARMInterruptAttr *Attr = FD->getAttr<ARMInterruptAttr>();
5057 switch (Attr->getInterrupt()) {
5058 case ARMInterruptAttr::Generic: Kind = ""; break;
5059 case ARMInterruptAttr::IRQ: Kind = "IRQ"; break;
5060 case ARMInterruptAttr::FIQ: Kind = "FIQ"; break;
5061 case ARMInterruptAttr::SWI: Kind = "SWI"; break;
5062 case ARMInterruptAttr::ABORT: Kind = "ABORT"; break;
5063 case ARMInterruptAttr::UNDEF: Kind = "UNDEF"; break;
5066 llvm::Function *Fn = cast<llvm::Function>(GV);
5068 Fn->addFnAttr("interrupt", Kind);
5070 ARMABIInfo::ABIKind ABI = cast<ARMABIInfo>(getABIInfo()).getABIKind();
5071 if (ABI == ARMABIInfo::APCS)
5074 // AAPCS guarantees that sp will be 8-byte aligned on any public interface,
5075 // however this is not necessarily true on taking any interrupt. Instruct
5076 // the backend to perform a realignment as part of the function prologue.
5077 llvm::AttrBuilder B;
5078 B.addStackAlignmentAttr(8);
5079 Fn->addAttributes(llvm::AttributeSet::FunctionIndex,
5080 llvm::AttributeSet::get(CGM.getLLVMContext(),
5081 llvm::AttributeSet::FunctionIndex,
5086 class WindowsARMTargetCodeGenInfo : public ARMTargetCodeGenInfo {
5088 WindowsARMTargetCodeGenInfo(CodeGenTypes &CGT, ARMABIInfo::ABIKind K)
5089 : ARMTargetCodeGenInfo(CGT, K) {}
5091 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
5092 CodeGen::CodeGenModule &CGM) const override;
5094 void getDependentLibraryOption(llvm::StringRef Lib,
5095 llvm::SmallString<24> &Opt) const override {
5096 Opt = "/DEFAULTLIB:" + qualifyWindowsLibrary(Lib);
5099 void getDetectMismatchOption(llvm::StringRef Name, llvm::StringRef Value,
5100 llvm::SmallString<32> &Opt) const override {
5101 Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\"";
5105 void WindowsARMTargetCodeGenInfo::setTargetAttributes(
5106 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const {
5107 ARMTargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
5108 addStackProbeSizeTargetAttribute(D, GV, CGM);
5112 void ARMABIInfo::computeInfo(CGFunctionInfo &FI) const {
5113 if (!getCXXABI().classifyReturnType(FI))
5114 FI.getReturnInfo() =
5115 classifyReturnType(FI.getReturnType(), FI.isVariadic());
5117 for (auto &I : FI.arguments())
5118 I.info = classifyArgumentType(I.type, FI.isVariadic());
5120 // Always honor user-specified calling convention.
5121 if (FI.getCallingConvention() != llvm::CallingConv::C)
5124 llvm::CallingConv::ID cc = getRuntimeCC();
5125 if (cc != llvm::CallingConv::C)
5126 FI.setEffectiveCallingConvention(cc);
5129 /// Return the default calling convention that LLVM will use.
5130 llvm::CallingConv::ID ARMABIInfo::getLLVMDefaultCC() const {
5131 // The default calling convention that LLVM will infer.
5132 if (isEABIHF() || getTarget().getTriple().isWatchABI())
5133 return llvm::CallingConv::ARM_AAPCS_VFP;
5135 return llvm::CallingConv::ARM_AAPCS;
5137 return llvm::CallingConv::ARM_APCS;
5140 /// Return the calling convention that our ABI would like us to use
5141 /// as the C calling convention.
5142 llvm::CallingConv::ID ARMABIInfo::getABIDefaultCC() const {
5143 switch (getABIKind()) {
5144 case APCS: return llvm::CallingConv::ARM_APCS;
5145 case AAPCS: return llvm::CallingConv::ARM_AAPCS;
5146 case AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP;
5147 case AAPCS16_VFP: return llvm::CallingConv::ARM_AAPCS_VFP;
5149 llvm_unreachable("bad ABI kind");
5152 void ARMABIInfo::setCCs() {
5153 assert(getRuntimeCC() == llvm::CallingConv::C);
5155 // Don't muddy up the IR with a ton of explicit annotations if
5156 // they'd just match what LLVM will infer from the triple.
5157 llvm::CallingConv::ID abiCC = getABIDefaultCC();
5158 if (abiCC != getLLVMDefaultCC())
5161 // AAPCS apparently requires runtime support functions to be soft-float, but
5162 // that's almost certainly for historic reasons (Thumb1 not supporting VFP
5163 // most likely). It's more convenient for AAPCS16_VFP to be hard-float.
5164 switch (getABIKind()) {
5167 if (abiCC != getLLVMDefaultCC())
5172 BuiltinCC = llvm::CallingConv::ARM_AAPCS;
5177 ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty,
5178 bool isVariadic) const {
5179 // 6.1.2.1 The following argument types are VFP CPRCs:
5180 // A single-precision floating-point type (including promoted
5181 // half-precision types); A double-precision floating-point type;
5182 // A 64-bit or 128-bit containerized vector type; Homogeneous Aggregate
5183 // with a Base Type of a single- or double-precision floating-point type,
5184 // 64-bit containerized vectors or 128-bit containerized vectors with one
5185 // to four Elements.
5186 bool IsEffectivelyAAPCS_VFP = getABIKind() == AAPCS_VFP && !isVariadic;
5188 Ty = useFirstFieldIfTransparentUnion(Ty);
5190 // Handle illegal vector types here.
5191 if (isIllegalVectorType(Ty)) {
5192 uint64_t Size = getContext().getTypeSize(Ty);
5194 llvm::Type *ResType =
5195 llvm::Type::getInt32Ty(getVMContext());
5196 return ABIArgInfo::getDirect(ResType);
5199 llvm::Type *ResType = llvm::VectorType::get(
5200 llvm::Type::getInt32Ty(getVMContext()), 2);
5201 return ABIArgInfo::getDirect(ResType);
5204 llvm::Type *ResType = llvm::VectorType::get(
5205 llvm::Type::getInt32Ty(getVMContext()), 4);
5206 return ABIArgInfo::getDirect(ResType);
5208 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
5211 // __fp16 gets passed as if it were an int or float, but with the top 16 bits
5212 // unspecified. This is not done for OpenCL as it handles the half type
5213 // natively, and does not need to interwork with AAPCS code.
5214 if (Ty->isHalfType() && !getContext().getLangOpts().NativeHalfArgsAndReturns) {
5215 llvm::Type *ResType = IsEffectivelyAAPCS_VFP ?
5216 llvm::Type::getFloatTy(getVMContext()) :
5217 llvm::Type::getInt32Ty(getVMContext());
5218 return ABIArgInfo::getDirect(ResType);
5221 if (!isAggregateTypeForABI(Ty)) {
5222 // Treat an enum type as its underlying type.
5223 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) {
5224 Ty = EnumTy->getDecl()->getIntegerType();
5227 return (Ty->isPromotableIntegerType() ? ABIArgInfo::getExtend()
5228 : ABIArgInfo::getDirect());
5231 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) {
5232 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
5235 // Ignore empty records.
5236 if (isEmptyRecord(getContext(), Ty, true))
5237 return ABIArgInfo::getIgnore();
5239 if (IsEffectivelyAAPCS_VFP) {
5240 // Homogeneous Aggregates need to be expanded when we can fit the aggregate
5241 // into VFP registers.
5242 const Type *Base = nullptr;
5243 uint64_t Members = 0;
5244 if (isHomogeneousAggregate(Ty, Base, Members)) {
5245 assert(Base && "Base class should be set for homogeneous aggregate");
5246 // Base can be a floating-point or a vector.
5247 return ABIArgInfo::getDirect(nullptr, 0, nullptr, false);
5249 } else if (getABIKind() == ARMABIInfo::AAPCS16_VFP) {
5250 // WatchOS does have homogeneous aggregates. Note that we intentionally use
5251 // this convention even for a variadic function: the backend will use GPRs
5253 const Type *Base = nullptr;
5254 uint64_t Members = 0;
5255 if (isHomogeneousAggregate(Ty, Base, Members)) {
5256 assert(Base && Members <= 4 && "unexpected homogeneous aggregate");
5258 llvm::ArrayType::get(CGT.ConvertType(QualType(Base, 0)), Members);
5259 return ABIArgInfo::getDirect(Ty, 0, nullptr, false);
5263 if (getABIKind() == ARMABIInfo::AAPCS16_VFP &&
5264 getContext().getTypeSizeInChars(Ty) > CharUnits::fromQuantity(16)) {
5265 // WatchOS is adopting the 64-bit AAPCS rule on composite types: if they're
5266 // bigger than 128-bits, they get placed in space allocated by the caller,
5267 // and a pointer is passed.
5268 return ABIArgInfo::getIndirect(
5269 CharUnits::fromQuantity(getContext().getTypeAlign(Ty) / 8), false);
5272 // Support byval for ARM.
5273 // The ABI alignment for APCS is 4-byte and for AAPCS at least 4-byte and at
5274 // most 8-byte. We realign the indirect argument if type alignment is bigger
5275 // than ABI alignment.
5276 uint64_t ABIAlign = 4;
5277 uint64_t TyAlign = getContext().getTypeAlign(Ty) / 8;
5278 if (getABIKind() == ARMABIInfo::AAPCS_VFP ||
5279 getABIKind() == ARMABIInfo::AAPCS)
5280 ABIAlign = std::min(std::max(TyAlign, (uint64_t)4), (uint64_t)8);
5282 if (getContext().getTypeSizeInChars(Ty) > CharUnits::fromQuantity(64)) {
5283 assert(getABIKind() != ARMABIInfo::AAPCS16_VFP && "unexpected byval");
5284 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(ABIAlign),
5286 /*Realign=*/TyAlign > ABIAlign);
5289 // Otherwise, pass by coercing to a structure of the appropriate size.
5292 // FIXME: Try to match the types of the arguments more accurately where
5294 if (getContext().getTypeAlign(Ty) <= 32) {
5295 ElemTy = llvm::Type::getInt32Ty(getVMContext());
5296 SizeRegs = (getContext().getTypeSize(Ty) + 31) / 32;
5298 ElemTy = llvm::Type::getInt64Ty(getVMContext());
5299 SizeRegs = (getContext().getTypeSize(Ty) + 63) / 64;
5302 return ABIArgInfo::getDirect(llvm::ArrayType::get(ElemTy, SizeRegs));
5305 static bool isIntegerLikeType(QualType Ty, ASTContext &Context,
5306 llvm::LLVMContext &VMContext) {
5307 // APCS, C Language Calling Conventions, Non-Simple Return Values: A structure
5308 // is called integer-like if its size is less than or equal to one word, and
5309 // the offset of each of its addressable sub-fields is zero.
5311 uint64_t Size = Context.getTypeSize(Ty);
5313 // Check that the type fits in a word.
5317 // FIXME: Handle vector types!
5318 if (Ty->isVectorType())
5321 // Float types are never treated as "integer like".
5322 if (Ty->isRealFloatingType())
5325 // If this is a builtin or pointer type then it is ok.
5326 if (Ty->getAs<BuiltinType>() || Ty->isPointerType())
5329 // Small complex integer types are "integer like".
5330 if (const ComplexType *CT = Ty->getAs<ComplexType>())
5331 return isIntegerLikeType(CT->getElementType(), Context, VMContext);
5333 // Single element and zero sized arrays should be allowed, by the definition
5334 // above, but they are not.
5336 // Otherwise, it must be a record type.
5337 const RecordType *RT = Ty->getAs<RecordType>();
5338 if (!RT) return false;
5340 // Ignore records with flexible arrays.
5341 const RecordDecl *RD = RT->getDecl();
5342 if (RD->hasFlexibleArrayMember())
5345 // Check that all sub-fields are at offset 0, and are themselves "integer
5347 const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
5349 bool HadField = false;
5351 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
5352 i != e; ++i, ++idx) {
5353 const FieldDecl *FD = *i;
5355 // Bit-fields are not addressable, we only need to verify they are "integer
5356 // like". We still have to disallow a subsequent non-bitfield, for example:
5357 // struct { int : 0; int x }
5358 // is non-integer like according to gcc.
5359 if (FD->isBitField()) {
5363 if (!isIntegerLikeType(FD->getType(), Context, VMContext))
5369 // Check if this field is at offset 0.
5370 if (Layout.getFieldOffset(idx) != 0)
5373 if (!isIntegerLikeType(FD->getType(), Context, VMContext))
5376 // Only allow at most one field in a structure. This doesn't match the
5377 // wording above, but follows gcc in situations with a field following an
5379 if (!RD->isUnion()) {
5390 ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy,
5391 bool isVariadic) const {
5392 bool IsEffectivelyAAPCS_VFP =
5393 (getABIKind() == AAPCS_VFP || getABIKind() == AAPCS16_VFP) && !isVariadic;
5395 if (RetTy->isVoidType())
5396 return ABIArgInfo::getIgnore();
5398 // Large vector types should be returned via memory.
5399 if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 128) {
5400 return getNaturalAlignIndirect(RetTy);
5403 // __fp16 gets returned as if it were an int or float, but with the top 16
5404 // bits unspecified. This is not done for OpenCL as it handles the half type
5405 // natively, and does not need to interwork with AAPCS code.
5406 if (RetTy->isHalfType() && !getContext().getLangOpts().NativeHalfArgsAndReturns) {
5407 llvm::Type *ResType = IsEffectivelyAAPCS_VFP ?
5408 llvm::Type::getFloatTy(getVMContext()) :
5409 llvm::Type::getInt32Ty(getVMContext());
5410 return ABIArgInfo::getDirect(ResType);
5413 if (!isAggregateTypeForABI(RetTy)) {
5414 // Treat an enum type as its underlying type.
5415 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
5416 RetTy = EnumTy->getDecl()->getIntegerType();
5418 return RetTy->isPromotableIntegerType() ? ABIArgInfo::getExtend()
5419 : ABIArgInfo::getDirect();
5422 // Are we following APCS?
5423 if (getABIKind() == APCS) {
5424 if (isEmptyRecord(getContext(), RetTy, false))
5425 return ABIArgInfo::getIgnore();
5427 // Complex types are all returned as packed integers.
5429 // FIXME: Consider using 2 x vector types if the back end handles them
5431 if (RetTy->isAnyComplexType())
5432 return ABIArgInfo::getDirect(llvm::IntegerType::get(
5433 getVMContext(), getContext().getTypeSize(RetTy)));
5435 // Integer like structures are returned in r0.
5436 if (isIntegerLikeType(RetTy, getContext(), getVMContext())) {
5437 // Return in the smallest viable integer type.
5438 uint64_t Size = getContext().getTypeSize(RetTy);
5440 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
5442 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
5443 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
5446 // Otherwise return in memory.
5447 return getNaturalAlignIndirect(RetTy);
5450 // Otherwise this is an AAPCS variant.
5452 if (isEmptyRecord(getContext(), RetTy, true))
5453 return ABIArgInfo::getIgnore();
5455 // Check for homogeneous aggregates with AAPCS-VFP.
5456 if (IsEffectivelyAAPCS_VFP) {
5457 const Type *Base = nullptr;
5458 uint64_t Members = 0;
5459 if (isHomogeneousAggregate(RetTy, Base, Members)) {
5460 assert(Base && "Base class should be set for homogeneous aggregate");
5461 // Homogeneous Aggregates are returned directly.
5462 return ABIArgInfo::getDirect(nullptr, 0, nullptr, false);
5466 // Aggregates <= 4 bytes are returned in r0; other aggregates
5467 // are returned indirectly.
5468 uint64_t Size = getContext().getTypeSize(RetTy);
5470 if (getDataLayout().isBigEndian())
5471 // Return in 32 bit integer integer type (as if loaded by LDR, AAPCS 5.4)
5472 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
5474 // Return in the smallest viable integer type.
5476 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
5478 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
5479 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
5480 } else if (Size <= 128 && getABIKind() == AAPCS16_VFP) {
5481 llvm::Type *Int32Ty = llvm::Type::getInt32Ty(getVMContext());
5482 llvm::Type *CoerceTy =
5483 llvm::ArrayType::get(Int32Ty, llvm::alignTo(Size, 32) / 32);
5484 return ABIArgInfo::getDirect(CoerceTy);
5487 return getNaturalAlignIndirect(RetTy);
5490 /// isIllegalVector - check whether Ty is an illegal vector type.
5491 bool ARMABIInfo::isIllegalVectorType(QualType Ty) const {
5492 if (const VectorType *VT = Ty->getAs<VectorType> ()) {
5494 // Android shipped using Clang 3.1, which supported a slightly different
5495 // vector ABI. The primary differences were that 3-element vector types
5496 // were legal, and so were sub 32-bit vectors (i.e. <2 x i8>). This path
5497 // accepts that legacy behavior for Android only.
5498 // Check whether VT is legal.
5499 unsigned NumElements = VT->getNumElements();
5500 // NumElements should be power of 2 or equal to 3.
5501 if (!llvm::isPowerOf2_32(NumElements) && NumElements != 3)
5504 // Check whether VT is legal.
5505 unsigned NumElements = VT->getNumElements();
5506 uint64_t Size = getContext().getTypeSize(VT);
5507 // NumElements should be power of 2.
5508 if (!llvm::isPowerOf2_32(NumElements))
5510 // Size should be greater than 32 bits.
5517 bool ARMABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
5518 // Homogeneous aggregates for AAPCS-VFP must have base types of float,
5519 // double, or 64-bit or 128-bit vectors.
5520 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
5521 if (BT->getKind() == BuiltinType::Float ||
5522 BT->getKind() == BuiltinType::Double ||
5523 BT->getKind() == BuiltinType::LongDouble)
5525 } else if (const VectorType *VT = Ty->getAs<VectorType>()) {
5526 unsigned VecSize = getContext().getTypeSize(VT);
5527 if (VecSize == 64 || VecSize == 128)
5533 bool ARMABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base,
5534 uint64_t Members) const {
5535 return Members <= 4;
5538 Address ARMABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
5539 QualType Ty) const {
5540 CharUnits SlotSize = CharUnits::fromQuantity(4);
5542 // Empty records are ignored for parameter passing purposes.
5543 if (isEmptyRecord(getContext(), Ty, true)) {
5544 Address Addr(CGF.Builder.CreateLoad(VAListAddr), SlotSize);
5545 Addr = CGF.Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(Ty));
5549 auto TyInfo = getContext().getTypeInfoInChars(Ty);
5550 CharUnits TyAlignForABI = TyInfo.second;
5552 // Use indirect if size of the illegal vector is bigger than 16 bytes.
5553 bool IsIndirect = false;
5554 const Type *Base = nullptr;
5555 uint64_t Members = 0;
5556 if (TyInfo.first > CharUnits::fromQuantity(16) && isIllegalVectorType(Ty)) {
5559 // ARMv7k passes structs bigger than 16 bytes indirectly, in space
5560 // allocated by the caller.
5561 } else if (TyInfo.first > CharUnits::fromQuantity(16) &&
5562 getABIKind() == ARMABIInfo::AAPCS16_VFP &&
5563 !isHomogeneousAggregate(Ty, Base, Members)) {
5566 // Otherwise, bound the type's ABI alignment.
5567 // The ABI alignment for 64-bit or 128-bit vectors is 8 for AAPCS and 4 for
5568 // APCS. For AAPCS, the ABI alignment is at least 4-byte and at most 8-byte.
5569 // Our callers should be prepared to handle an under-aligned address.
5570 } else if (getABIKind() == ARMABIInfo::AAPCS_VFP ||
5571 getABIKind() == ARMABIInfo::AAPCS) {
5572 TyAlignForABI = std::max(TyAlignForABI, CharUnits::fromQuantity(4));
5573 TyAlignForABI = std::min(TyAlignForABI, CharUnits::fromQuantity(8));
5574 } else if (getABIKind() == ARMABIInfo::AAPCS16_VFP) {
5575 // ARMv7k allows type alignment up to 16 bytes.
5576 TyAlignForABI = std::max(TyAlignForABI, CharUnits::fromQuantity(4));
5577 TyAlignForABI = std::min(TyAlignForABI, CharUnits::fromQuantity(16));
5579 TyAlignForABI = CharUnits::fromQuantity(4);
5581 TyInfo.second = TyAlignForABI;
5583 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect, TyInfo,
5584 SlotSize, /*AllowHigherAlign*/ true);
5587 //===----------------------------------------------------------------------===//
5588 // NVPTX ABI Implementation
5589 //===----------------------------------------------------------------------===//
5593 class NVPTXABIInfo : public ABIInfo {
5595 NVPTXABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
5597 ABIArgInfo classifyReturnType(QualType RetTy) const;
5598 ABIArgInfo classifyArgumentType(QualType Ty) const;
5600 void computeInfo(CGFunctionInfo &FI) const override;
5601 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
5602 QualType Ty) const override;
5605 class NVPTXTargetCodeGenInfo : public TargetCodeGenInfo {
5607 NVPTXTargetCodeGenInfo(CodeGenTypes &CGT)
5608 : TargetCodeGenInfo(new NVPTXABIInfo(CGT)) {}
5610 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
5611 CodeGen::CodeGenModule &M) const override;
5613 // Adds a NamedMDNode with F, Name, and Operand as operands, and adds the
5614 // resulting MDNode to the nvvm.annotations MDNode.
5615 static void addNVVMMetadata(llvm::Function *F, StringRef Name, int Operand);
5618 ABIArgInfo NVPTXABIInfo::classifyReturnType(QualType RetTy) const {
5619 if (RetTy->isVoidType())
5620 return ABIArgInfo::getIgnore();
5622 // note: this is different from default ABI
5623 if (!RetTy->isScalarType())
5624 return ABIArgInfo::getDirect();
5626 // Treat an enum type as its underlying type.
5627 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
5628 RetTy = EnumTy->getDecl()->getIntegerType();
5630 return (RetTy->isPromotableIntegerType() ?
5631 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
5634 ABIArgInfo NVPTXABIInfo::classifyArgumentType(QualType Ty) const {
5635 // Treat an enum type as its underlying type.
5636 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
5637 Ty = EnumTy->getDecl()->getIntegerType();
5639 // Return aggregates type as indirect by value
5640 if (isAggregateTypeForABI(Ty))
5641 return getNaturalAlignIndirect(Ty, /* byval */ true);
5643 return (Ty->isPromotableIntegerType() ?
5644 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
5647 void NVPTXABIInfo::computeInfo(CGFunctionInfo &FI) const {
5648 if (!getCXXABI().classifyReturnType(FI))
5649 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
5650 for (auto &I : FI.arguments())
5651 I.info = classifyArgumentType(I.type);
5653 // Always honor user-specified calling convention.
5654 if (FI.getCallingConvention() != llvm::CallingConv::C)
5657 FI.setEffectiveCallingConvention(getRuntimeCC());
5660 Address NVPTXABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
5661 QualType Ty) const {
5662 llvm_unreachable("NVPTX does not support varargs");
5665 void NVPTXTargetCodeGenInfo::
5666 setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
5667 CodeGen::CodeGenModule &M) const{
5668 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
5671 llvm::Function *F = cast<llvm::Function>(GV);
5673 // Perform special handling in OpenCL mode
5674 if (M.getLangOpts().OpenCL) {
5675 // Use OpenCL function attributes to check for kernel functions
5676 // By default, all functions are device functions
5677 if (FD->hasAttr<OpenCLKernelAttr>()) {
5678 // OpenCL __kernel functions get kernel metadata
5679 // Create !{<func-ref>, metadata !"kernel", i32 1} node
5680 addNVVMMetadata(F, "kernel", 1);
5681 // And kernel functions are not subject to inlining
5682 F->addFnAttr(llvm::Attribute::NoInline);
5686 // Perform special handling in CUDA mode.
5687 if (M.getLangOpts().CUDA) {
5688 // CUDA __global__ functions get a kernel metadata entry. Since
5689 // __global__ functions cannot be called from the device, we do not
5690 // need to set the noinline attribute.
5691 if (FD->hasAttr<CUDAGlobalAttr>()) {
5692 // Create !{<func-ref>, metadata !"kernel", i32 1} node
5693 addNVVMMetadata(F, "kernel", 1);
5695 if (CUDALaunchBoundsAttr *Attr = FD->getAttr<CUDALaunchBoundsAttr>()) {
5696 // Create !{<func-ref>, metadata !"maxntidx", i32 <val>} node
5697 llvm::APSInt MaxThreads(32);
5698 MaxThreads = Attr->getMaxThreads()->EvaluateKnownConstInt(M.getContext());
5700 addNVVMMetadata(F, "maxntidx", MaxThreads.getExtValue());
5702 // min blocks is an optional argument for CUDALaunchBoundsAttr. If it was
5703 // not specified in __launch_bounds__ or if the user specified a 0 value,
5704 // we don't have to add a PTX directive.
5705 if (Attr->getMinBlocks()) {
5706 llvm::APSInt MinBlocks(32);
5707 MinBlocks = Attr->getMinBlocks()->EvaluateKnownConstInt(M.getContext());
5709 // Create !{<func-ref>, metadata !"minctasm", i32 <val>} node
5710 addNVVMMetadata(F, "minctasm", MinBlocks.getExtValue());
5716 void NVPTXTargetCodeGenInfo::addNVVMMetadata(llvm::Function *F, StringRef Name,
5718 llvm::Module *M = F->getParent();
5719 llvm::LLVMContext &Ctx = M->getContext();
5721 // Get "nvvm.annotations" metadata node
5722 llvm::NamedMDNode *MD = M->getOrInsertNamedMetadata("nvvm.annotations");
5724 llvm::Metadata *MDVals[] = {
5725 llvm::ConstantAsMetadata::get(F), llvm::MDString::get(Ctx, Name),
5726 llvm::ConstantAsMetadata::get(
5727 llvm::ConstantInt::get(llvm::Type::getInt32Ty(Ctx), Operand))};
5728 // Append metadata to nvvm.annotations
5729 MD->addOperand(llvm::MDNode::get(Ctx, MDVals));
5733 //===----------------------------------------------------------------------===//
5734 // SystemZ ABI Implementation
5735 //===----------------------------------------------------------------------===//
5739 class SystemZABIInfo : public SwiftABIInfo {
5743 SystemZABIInfo(CodeGenTypes &CGT, bool HV)
5744 : SwiftABIInfo(CGT), HasVector(HV) {}
5746 bool isPromotableIntegerType(QualType Ty) const;
5747 bool isCompoundType(QualType Ty) const;
5748 bool isVectorArgumentType(QualType Ty) const;
5749 bool isFPArgumentType(QualType Ty) const;
5750 QualType GetSingleElementType(QualType Ty) const;
5752 ABIArgInfo classifyReturnType(QualType RetTy) const;
5753 ABIArgInfo classifyArgumentType(QualType ArgTy) const;
5755 void computeInfo(CGFunctionInfo &FI) const override {
5756 if (!getCXXABI().classifyReturnType(FI))
5757 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
5758 for (auto &I : FI.arguments())
5759 I.info = classifyArgumentType(I.type);
5762 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
5763 QualType Ty) const override;
5765 bool shouldPassIndirectlyForSwift(CharUnits totalSize,
5766 ArrayRef<llvm::Type*> scalars,
5767 bool asReturnValue) const override {
5768 return occupiesMoreThan(CGT, scalars, /*total*/ 4);
5772 class SystemZTargetCodeGenInfo : public TargetCodeGenInfo {
5774 SystemZTargetCodeGenInfo(CodeGenTypes &CGT, bool HasVector)
5775 : TargetCodeGenInfo(new SystemZABIInfo(CGT, HasVector)) {}
5780 bool SystemZABIInfo::isPromotableIntegerType(QualType Ty) const {
5781 // Treat an enum type as its underlying type.
5782 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
5783 Ty = EnumTy->getDecl()->getIntegerType();
5785 // Promotable integer types are required to be promoted by the ABI.
5786 if (Ty->isPromotableIntegerType())
5789 // 32-bit values must also be promoted.
5790 if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
5791 switch (BT->getKind()) {
5792 case BuiltinType::Int:
5793 case BuiltinType::UInt:
5801 bool SystemZABIInfo::isCompoundType(QualType Ty) const {
5802 return (Ty->isAnyComplexType() ||
5803 Ty->isVectorType() ||
5804 isAggregateTypeForABI(Ty));
5807 bool SystemZABIInfo::isVectorArgumentType(QualType Ty) const {
5808 return (HasVector &&
5809 Ty->isVectorType() &&
5810 getContext().getTypeSize(Ty) <= 128);
5813 bool SystemZABIInfo::isFPArgumentType(QualType Ty) const {
5814 if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
5815 switch (BT->getKind()) {
5816 case BuiltinType::Float:
5817 case BuiltinType::Double:
5826 QualType SystemZABIInfo::GetSingleElementType(QualType Ty) const {
5827 if (const RecordType *RT = Ty->getAsStructureType()) {
5828 const RecordDecl *RD = RT->getDecl();
5831 // If this is a C++ record, check the bases first.
5832 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
5833 for (const auto &I : CXXRD->bases()) {
5834 QualType Base = I.getType();
5836 // Empty bases don't affect things either way.
5837 if (isEmptyRecord(getContext(), Base, true))
5840 if (!Found.isNull())
5842 Found = GetSingleElementType(Base);
5845 // Check the fields.
5846 for (const auto *FD : RD->fields()) {
5847 // For compatibility with GCC, ignore empty bitfields in C++ mode.
5848 // Unlike isSingleElementStruct(), empty structure and array fields
5849 // do count. So do anonymous bitfields that aren't zero-sized.
5850 if (getContext().getLangOpts().CPlusPlus &&
5851 FD->isBitField() && FD->getBitWidthValue(getContext()) == 0)
5854 // Unlike isSingleElementStruct(), arrays do not count.
5855 // Nested structures still do though.
5856 if (!Found.isNull())
5858 Found = GetSingleElementType(FD->getType());
5861 // Unlike isSingleElementStruct(), trailing padding is allowed.
5862 // An 8-byte aligned struct s { float f; } is passed as a double.
5863 if (!Found.isNull())
5870 Address SystemZABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
5871 QualType Ty) const {
5872 // Assume that va_list type is correct; should be pointer to LLVM type:
5876 // i8 *__overflow_arg_area;
5877 // i8 *__reg_save_area;
5880 // Every non-vector argument occupies 8 bytes and is passed by preference
5881 // in either GPRs or FPRs. Vector arguments occupy 8 or 16 bytes and are
5882 // always passed on the stack.
5883 Ty = getContext().getCanonicalType(Ty);
5884 auto TyInfo = getContext().getTypeInfoInChars(Ty);
5885 llvm::Type *ArgTy = CGF.ConvertTypeForMem(Ty);
5886 llvm::Type *DirectTy = ArgTy;
5887 ABIArgInfo AI = classifyArgumentType(Ty);
5888 bool IsIndirect = AI.isIndirect();
5889 bool InFPRs = false;
5890 bool IsVector = false;
5891 CharUnits UnpaddedSize;
5892 CharUnits DirectAlign;
5894 DirectTy = llvm::PointerType::getUnqual(DirectTy);
5895 UnpaddedSize = DirectAlign = CharUnits::fromQuantity(8);
5897 if (AI.getCoerceToType())
5898 ArgTy = AI.getCoerceToType();
5899 InFPRs = ArgTy->isFloatTy() || ArgTy->isDoubleTy();
5900 IsVector = ArgTy->isVectorTy();
5901 UnpaddedSize = TyInfo.first;
5902 DirectAlign = TyInfo.second;
5904 CharUnits PaddedSize = CharUnits::fromQuantity(8);
5905 if (IsVector && UnpaddedSize > PaddedSize)
5906 PaddedSize = CharUnits::fromQuantity(16);
5907 assert((UnpaddedSize <= PaddedSize) && "Invalid argument size.");
5909 CharUnits Padding = (PaddedSize - UnpaddedSize);
5911 llvm::Type *IndexTy = CGF.Int64Ty;
5912 llvm::Value *PaddedSizeV =
5913 llvm::ConstantInt::get(IndexTy, PaddedSize.getQuantity());
5916 // Work out the address of a vector argument on the stack.
5917 // Vector arguments are always passed in the high bits of a
5918 // single (8 byte) or double (16 byte) stack slot.
5919 Address OverflowArgAreaPtr =
5920 CGF.Builder.CreateStructGEP(VAListAddr, 2, CharUnits::fromQuantity(16),
5921 "overflow_arg_area_ptr");
5922 Address OverflowArgArea =
5923 Address(CGF.Builder.CreateLoad(OverflowArgAreaPtr, "overflow_arg_area"),
5926 CGF.Builder.CreateElementBitCast(OverflowArgArea, DirectTy, "mem_addr");
5928 // Update overflow_arg_area_ptr pointer
5929 llvm::Value *NewOverflowArgArea =
5930 CGF.Builder.CreateGEP(OverflowArgArea.getPointer(), PaddedSizeV,
5931 "overflow_arg_area");
5932 CGF.Builder.CreateStore(NewOverflowArgArea, OverflowArgAreaPtr);
5937 assert(PaddedSize.getQuantity() == 8);
5939 unsigned MaxRegs, RegCountField, RegSaveIndex;
5940 CharUnits RegPadding;
5942 MaxRegs = 4; // Maximum of 4 FPR arguments
5943 RegCountField = 1; // __fpr
5944 RegSaveIndex = 16; // save offset for f0
5945 RegPadding = CharUnits(); // floats are passed in the high bits of an FPR
5947 MaxRegs = 5; // Maximum of 5 GPR arguments
5948 RegCountField = 0; // __gpr
5949 RegSaveIndex = 2; // save offset for r2
5950 RegPadding = Padding; // values are passed in the low bits of a GPR
5953 Address RegCountPtr = CGF.Builder.CreateStructGEP(
5954 VAListAddr, RegCountField, RegCountField * CharUnits::fromQuantity(8),
5956 llvm::Value *RegCount = CGF.Builder.CreateLoad(RegCountPtr, "reg_count");
5957 llvm::Value *MaxRegsV = llvm::ConstantInt::get(IndexTy, MaxRegs);
5958 llvm::Value *InRegs = CGF.Builder.CreateICmpULT(RegCount, MaxRegsV,
5961 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
5962 llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem");
5963 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
5964 CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock);
5966 // Emit code to load the value if it was passed in registers.
5967 CGF.EmitBlock(InRegBlock);
5969 // Work out the address of an argument register.
5970 llvm::Value *ScaledRegCount =
5971 CGF.Builder.CreateMul(RegCount, PaddedSizeV, "scaled_reg_count");
5972 llvm::Value *RegBase =
5973 llvm::ConstantInt::get(IndexTy, RegSaveIndex * PaddedSize.getQuantity()
5974 + RegPadding.getQuantity());
5975 llvm::Value *RegOffset =
5976 CGF.Builder.CreateAdd(ScaledRegCount, RegBase, "reg_offset");
5977 Address RegSaveAreaPtr =
5978 CGF.Builder.CreateStructGEP(VAListAddr, 3, CharUnits::fromQuantity(24),
5979 "reg_save_area_ptr");
5980 llvm::Value *RegSaveArea =
5981 CGF.Builder.CreateLoad(RegSaveAreaPtr, "reg_save_area");
5982 Address RawRegAddr(CGF.Builder.CreateGEP(RegSaveArea, RegOffset,
5986 CGF.Builder.CreateElementBitCast(RawRegAddr, DirectTy, "reg_addr");
5988 // Update the register count
5989 llvm::Value *One = llvm::ConstantInt::get(IndexTy, 1);
5990 llvm::Value *NewRegCount =
5991 CGF.Builder.CreateAdd(RegCount, One, "reg_count");
5992 CGF.Builder.CreateStore(NewRegCount, RegCountPtr);
5993 CGF.EmitBranch(ContBlock);
5995 // Emit code to load the value if it was passed in memory.
5996 CGF.EmitBlock(InMemBlock);
5998 // Work out the address of a stack argument.
5999 Address OverflowArgAreaPtr = CGF.Builder.CreateStructGEP(
6000 VAListAddr, 2, CharUnits::fromQuantity(16), "overflow_arg_area_ptr");
6001 Address OverflowArgArea =
6002 Address(CGF.Builder.CreateLoad(OverflowArgAreaPtr, "overflow_arg_area"),
6004 Address RawMemAddr =
6005 CGF.Builder.CreateConstByteGEP(OverflowArgArea, Padding, "raw_mem_addr");
6007 CGF.Builder.CreateElementBitCast(RawMemAddr, DirectTy, "mem_addr");
6009 // Update overflow_arg_area_ptr pointer
6010 llvm::Value *NewOverflowArgArea =
6011 CGF.Builder.CreateGEP(OverflowArgArea.getPointer(), PaddedSizeV,
6012 "overflow_arg_area");
6013 CGF.Builder.CreateStore(NewOverflowArgArea, OverflowArgAreaPtr);
6014 CGF.EmitBranch(ContBlock);
6016 // Return the appropriate result.
6017 CGF.EmitBlock(ContBlock);
6018 Address ResAddr = emitMergePHI(CGF, RegAddr, InRegBlock,
6019 MemAddr, InMemBlock, "va_arg.addr");
6022 ResAddr = Address(CGF.Builder.CreateLoad(ResAddr, "indirect_arg"),
6028 ABIArgInfo SystemZABIInfo::classifyReturnType(QualType RetTy) const {
6029 if (RetTy->isVoidType())
6030 return ABIArgInfo::getIgnore();
6031 if (isVectorArgumentType(RetTy))
6032 return ABIArgInfo::getDirect();
6033 if (isCompoundType(RetTy) || getContext().getTypeSize(RetTy) > 64)
6034 return getNaturalAlignIndirect(RetTy);
6035 return (isPromotableIntegerType(RetTy) ?
6036 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
6039 ABIArgInfo SystemZABIInfo::classifyArgumentType(QualType Ty) const {
6040 // Handle the generic C++ ABI.
6041 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
6042 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
6044 // Integers and enums are extended to full register width.
6045 if (isPromotableIntegerType(Ty))
6046 return ABIArgInfo::getExtend();
6048 // Handle vector types and vector-like structure types. Note that
6049 // as opposed to float-like structure types, we do not allow any
6050 // padding for vector-like structures, so verify the sizes match.
6051 uint64_t Size = getContext().getTypeSize(Ty);
6052 QualType SingleElementTy = GetSingleElementType(Ty);
6053 if (isVectorArgumentType(SingleElementTy) &&
6054 getContext().getTypeSize(SingleElementTy) == Size)
6055 return ABIArgInfo::getDirect(CGT.ConvertType(SingleElementTy));
6057 // Values that are not 1, 2, 4 or 8 bytes in size are passed indirectly.
6058 if (Size != 8 && Size != 16 && Size != 32 && Size != 64)
6059 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
6061 // Handle small structures.
6062 if (const RecordType *RT = Ty->getAs<RecordType>()) {
6063 // Structures with flexible arrays have variable length, so really
6064 // fail the size test above.
6065 const RecordDecl *RD = RT->getDecl();
6066 if (RD->hasFlexibleArrayMember())
6067 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
6069 // The structure is passed as an unextended integer, a float, or a double.
6071 if (isFPArgumentType(SingleElementTy)) {
6072 assert(Size == 32 || Size == 64);
6074 PassTy = llvm::Type::getFloatTy(getVMContext());
6076 PassTy = llvm::Type::getDoubleTy(getVMContext());
6078 PassTy = llvm::IntegerType::get(getVMContext(), Size);
6079 return ABIArgInfo::getDirect(PassTy);
6082 // Non-structure compounds are passed indirectly.
6083 if (isCompoundType(Ty))
6084 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
6086 return ABIArgInfo::getDirect(nullptr);
6089 //===----------------------------------------------------------------------===//
6090 // MSP430 ABI Implementation
6091 //===----------------------------------------------------------------------===//
6095 class MSP430TargetCodeGenInfo : public TargetCodeGenInfo {
6097 MSP430TargetCodeGenInfo(CodeGenTypes &CGT)
6098 : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {}
6099 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
6100 CodeGen::CodeGenModule &M) const override;
6105 void MSP430TargetCodeGenInfo::setTargetAttributes(const Decl *D,
6106 llvm::GlobalValue *GV,
6107 CodeGen::CodeGenModule &M) const {
6108 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
6109 if (const MSP430InterruptAttr *attr = FD->getAttr<MSP430InterruptAttr>()) {
6110 // Handle 'interrupt' attribute:
6111 llvm::Function *F = cast<llvm::Function>(GV);
6113 // Step 1: Set ISR calling convention.
6114 F->setCallingConv(llvm::CallingConv::MSP430_INTR);
6116 // Step 2: Add attributes goodness.
6117 F->addFnAttr(llvm::Attribute::NoInline);
6119 // Step 3: Emit ISR vector alias.
6120 unsigned Num = attr->getNumber() / 2;
6121 llvm::GlobalAlias::create(llvm::Function::ExternalLinkage,
6122 "__isr_" + Twine(Num), F);
6127 //===----------------------------------------------------------------------===//
6128 // MIPS ABI Implementation. This works for both little-endian and
6129 // big-endian variants.
6130 //===----------------------------------------------------------------------===//
6133 class MipsABIInfo : public ABIInfo {
6135 unsigned MinABIStackAlignInBytes, StackAlignInBytes;
6136 void CoerceToIntArgs(uint64_t TySize,
6137 SmallVectorImpl<llvm::Type *> &ArgList) const;
6138 llvm::Type* HandleAggregates(QualType Ty, uint64_t TySize) const;
6139 llvm::Type* returnAggregateInRegs(QualType RetTy, uint64_t Size) const;
6140 llvm::Type* getPaddingType(uint64_t Align, uint64_t Offset) const;
6142 MipsABIInfo(CodeGenTypes &CGT, bool _IsO32) :
6143 ABIInfo(CGT), IsO32(_IsO32), MinABIStackAlignInBytes(IsO32 ? 4 : 8),
6144 StackAlignInBytes(IsO32 ? 8 : 16) {}
6146 ABIArgInfo classifyReturnType(QualType RetTy) const;
6147 ABIArgInfo classifyArgumentType(QualType RetTy, uint64_t &Offset) const;
6148 void computeInfo(CGFunctionInfo &FI) const override;
6149 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
6150 QualType Ty) const override;
6151 bool shouldSignExtUnsignedType(QualType Ty) const override;
6154 class MIPSTargetCodeGenInfo : public TargetCodeGenInfo {
6155 unsigned SizeOfUnwindException;
6157 MIPSTargetCodeGenInfo(CodeGenTypes &CGT, bool IsO32)
6158 : TargetCodeGenInfo(new MipsABIInfo(CGT, IsO32)),
6159 SizeOfUnwindException(IsO32 ? 24 : 32) {}
6161 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
6165 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
6166 CodeGen::CodeGenModule &CGM) const override {
6167 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
6169 llvm::Function *Fn = cast<llvm::Function>(GV);
6170 if (FD->hasAttr<Mips16Attr>()) {
6171 Fn->addFnAttr("mips16");
6173 else if (FD->hasAttr<NoMips16Attr>()) {
6174 Fn->addFnAttr("nomips16");
6177 const MipsInterruptAttr *Attr = FD->getAttr<MipsInterruptAttr>();
6182 switch (Attr->getInterrupt()) {
6183 case MipsInterruptAttr::eic: Kind = "eic"; break;
6184 case MipsInterruptAttr::sw0: Kind = "sw0"; break;
6185 case MipsInterruptAttr::sw1: Kind = "sw1"; break;
6186 case MipsInterruptAttr::hw0: Kind = "hw0"; break;
6187 case MipsInterruptAttr::hw1: Kind = "hw1"; break;
6188 case MipsInterruptAttr::hw2: Kind = "hw2"; break;
6189 case MipsInterruptAttr::hw3: Kind = "hw3"; break;
6190 case MipsInterruptAttr::hw4: Kind = "hw4"; break;
6191 case MipsInterruptAttr::hw5: Kind = "hw5"; break;
6194 Fn->addFnAttr("interrupt", Kind);
6198 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
6199 llvm::Value *Address) const override;
6201 unsigned getSizeOfUnwindException() const override {
6202 return SizeOfUnwindException;
6207 void MipsABIInfo::CoerceToIntArgs(
6208 uint64_t TySize, SmallVectorImpl<llvm::Type *> &ArgList) const {
6209 llvm::IntegerType *IntTy =
6210 llvm::IntegerType::get(getVMContext(), MinABIStackAlignInBytes * 8);
6212 // Add (TySize / MinABIStackAlignInBytes) args of IntTy.
6213 for (unsigned N = TySize / (MinABIStackAlignInBytes * 8); N; --N)
6214 ArgList.push_back(IntTy);
6216 // If necessary, add one more integer type to ArgList.
6217 unsigned R = TySize % (MinABIStackAlignInBytes * 8);
6220 ArgList.push_back(llvm::IntegerType::get(getVMContext(), R));
6223 // In N32/64, an aligned double precision floating point field is passed in
6225 llvm::Type* MipsABIInfo::HandleAggregates(QualType Ty, uint64_t TySize) const {
6226 SmallVector<llvm::Type*, 8> ArgList, IntArgList;
6229 CoerceToIntArgs(TySize, ArgList);
6230 return llvm::StructType::get(getVMContext(), ArgList);
6233 if (Ty->isComplexType())
6234 return CGT.ConvertType(Ty);
6236 const RecordType *RT = Ty->getAs<RecordType>();
6238 // Unions/vectors are passed in integer registers.
6239 if (!RT || !RT->isStructureOrClassType()) {
6240 CoerceToIntArgs(TySize, ArgList);
6241 return llvm::StructType::get(getVMContext(), ArgList);
6244 const RecordDecl *RD = RT->getDecl();
6245 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
6246 assert(!(TySize % 8) && "Size of structure must be multiple of 8.");
6248 uint64_t LastOffset = 0;
6250 llvm::IntegerType *I64 = llvm::IntegerType::get(getVMContext(), 64);
6252 // Iterate over fields in the struct/class and check if there are any aligned
6254 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
6255 i != e; ++i, ++idx) {
6256 const QualType Ty = i->getType();
6257 const BuiltinType *BT = Ty->getAs<BuiltinType>();
6259 if (!BT || BT->getKind() != BuiltinType::Double)
6262 uint64_t Offset = Layout.getFieldOffset(idx);
6263 if (Offset % 64) // Ignore doubles that are not aligned.
6266 // Add ((Offset - LastOffset) / 64) args of type i64.
6267 for (unsigned j = (Offset - LastOffset) / 64; j > 0; --j)
6268 ArgList.push_back(I64);
6271 ArgList.push_back(llvm::Type::getDoubleTy(getVMContext()));
6272 LastOffset = Offset + 64;
6275 CoerceToIntArgs(TySize - LastOffset, IntArgList);
6276 ArgList.append(IntArgList.begin(), IntArgList.end());
6278 return llvm::StructType::get(getVMContext(), ArgList);
6281 llvm::Type *MipsABIInfo::getPaddingType(uint64_t OrigOffset,
6282 uint64_t Offset) const {
6283 if (OrigOffset + MinABIStackAlignInBytes > Offset)
6286 return llvm::IntegerType::get(getVMContext(), (Offset - OrigOffset) * 8);
6290 MipsABIInfo::classifyArgumentType(QualType Ty, uint64_t &Offset) const {
6291 Ty = useFirstFieldIfTransparentUnion(Ty);
6293 uint64_t OrigOffset = Offset;
6294 uint64_t TySize = getContext().getTypeSize(Ty);
6295 uint64_t Align = getContext().getTypeAlign(Ty) / 8;
6297 Align = std::min(std::max(Align, (uint64_t)MinABIStackAlignInBytes),
6298 (uint64_t)StackAlignInBytes);
6299 unsigned CurrOffset = llvm::alignTo(Offset, Align);
6300 Offset = CurrOffset + llvm::alignTo(TySize, Align * 8) / 8;
6302 if (isAggregateTypeForABI(Ty) || Ty->isVectorType()) {
6303 // Ignore empty aggregates.
6305 return ABIArgInfo::getIgnore();
6307 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) {
6308 Offset = OrigOffset + MinABIStackAlignInBytes;
6309 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
6312 // If we have reached here, aggregates are passed directly by coercing to
6313 // another structure type. Padding is inserted if the offset of the
6314 // aggregate is unaligned.
6315 ABIArgInfo ArgInfo =
6316 ABIArgInfo::getDirect(HandleAggregates(Ty, TySize), 0,
6317 getPaddingType(OrigOffset, CurrOffset));
6318 ArgInfo.setInReg(true);
6322 // Treat an enum type as its underlying type.
6323 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
6324 Ty = EnumTy->getDecl()->getIntegerType();
6326 // All integral types are promoted to the GPR width.
6327 if (Ty->isIntegralOrEnumerationType())
6328 return ABIArgInfo::getExtend();
6330 return ABIArgInfo::getDirect(
6331 nullptr, 0, IsO32 ? nullptr : getPaddingType(OrigOffset, CurrOffset));
6335 MipsABIInfo::returnAggregateInRegs(QualType RetTy, uint64_t Size) const {
6336 const RecordType *RT = RetTy->getAs<RecordType>();
6337 SmallVector<llvm::Type*, 8> RTList;
6339 if (RT && RT->isStructureOrClassType()) {
6340 const RecordDecl *RD = RT->getDecl();
6341 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
6342 unsigned FieldCnt = Layout.getFieldCount();
6344 // N32/64 returns struct/classes in floating point registers if the
6345 // following conditions are met:
6346 // 1. The size of the struct/class is no larger than 128-bit.
6347 // 2. The struct/class has one or two fields all of which are floating
6349 // 3. The offset of the first field is zero (this follows what gcc does).
6351 // Any other composite results are returned in integer registers.
6353 if (FieldCnt && (FieldCnt <= 2) && !Layout.getFieldOffset(0)) {
6354 RecordDecl::field_iterator b = RD->field_begin(), e = RD->field_end();
6355 for (; b != e; ++b) {
6356 const BuiltinType *BT = b->getType()->getAs<BuiltinType>();
6358 if (!BT || !BT->isFloatingPoint())
6361 RTList.push_back(CGT.ConvertType(b->getType()));
6365 return llvm::StructType::get(getVMContext(), RTList,
6366 RD->hasAttr<PackedAttr>());
6372 CoerceToIntArgs(Size, RTList);
6373 return llvm::StructType::get(getVMContext(), RTList);
6376 ABIArgInfo MipsABIInfo::classifyReturnType(QualType RetTy) const {
6377 uint64_t Size = getContext().getTypeSize(RetTy);
6379 if (RetTy->isVoidType())
6380 return ABIArgInfo::getIgnore();
6382 // O32 doesn't treat zero-sized structs differently from other structs.
6383 // However, N32/N64 ignores zero sized return values.
6384 if (!IsO32 && Size == 0)
6385 return ABIArgInfo::getIgnore();
6387 if (isAggregateTypeForABI(RetTy) || RetTy->isVectorType()) {
6389 if (RetTy->isAnyComplexType())
6390 return ABIArgInfo::getDirect();
6392 // O32 returns integer vectors in registers and N32/N64 returns all small
6393 // aggregates in registers.
6395 (RetTy->isVectorType() && !RetTy->hasFloatingRepresentation())) {
6396 ABIArgInfo ArgInfo =
6397 ABIArgInfo::getDirect(returnAggregateInRegs(RetTy, Size));
6398 ArgInfo.setInReg(true);
6403 return getNaturalAlignIndirect(RetTy);
6406 // Treat an enum type as its underlying type.
6407 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
6408 RetTy = EnumTy->getDecl()->getIntegerType();
6410 return (RetTy->isPromotableIntegerType() ?
6411 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
6414 void MipsABIInfo::computeInfo(CGFunctionInfo &FI) const {
6415 ABIArgInfo &RetInfo = FI.getReturnInfo();
6416 if (!getCXXABI().classifyReturnType(FI))
6417 RetInfo = classifyReturnType(FI.getReturnType());
6419 // Check if a pointer to an aggregate is passed as a hidden argument.
6420 uint64_t Offset = RetInfo.isIndirect() ? MinABIStackAlignInBytes : 0;
6422 for (auto &I : FI.arguments())
6423 I.info = classifyArgumentType(I.type, Offset);
6426 Address MipsABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
6427 QualType OrigTy) const {
6428 QualType Ty = OrigTy;
6430 // Integer arguments are promoted to 32-bit on O32 and 64-bit on N32/N64.
6431 // Pointers are also promoted in the same way but this only matters for N32.
6432 unsigned SlotSizeInBits = IsO32 ? 32 : 64;
6433 unsigned PtrWidth = getTarget().getPointerWidth(0);
6434 bool DidPromote = false;
6435 if ((Ty->isIntegerType() &&
6436 getContext().getIntWidth(Ty) < SlotSizeInBits) ||
6437 (Ty->isPointerType() && PtrWidth < SlotSizeInBits)) {
6439 Ty = getContext().getIntTypeForBitwidth(SlotSizeInBits,
6440 Ty->isSignedIntegerType());
6443 auto TyInfo = getContext().getTypeInfoInChars(Ty);
6445 // The alignment of things in the argument area is never larger than
6446 // StackAlignInBytes.
6448 std::min(TyInfo.second, CharUnits::fromQuantity(StackAlignInBytes));
6450 // MinABIStackAlignInBytes is the size of argument slots on the stack.
6451 CharUnits ArgSlotSize = CharUnits::fromQuantity(MinABIStackAlignInBytes);
6453 Address Addr = emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*indirect*/ false,
6454 TyInfo, ArgSlotSize, /*AllowHigherAlign*/ true);
6457 // If there was a promotion, "unpromote" into a temporary.
6458 // TODO: can we just use a pointer into a subset of the original slot?
6460 Address Temp = CGF.CreateMemTemp(OrigTy, "vaarg.promotion-temp");
6461 llvm::Value *Promoted = CGF.Builder.CreateLoad(Addr);
6463 // Truncate down to the right width.
6464 llvm::Type *IntTy = (OrigTy->isIntegerType() ? Temp.getElementType()
6466 llvm::Value *V = CGF.Builder.CreateTrunc(Promoted, IntTy);
6467 if (OrigTy->isPointerType())
6468 V = CGF.Builder.CreateIntToPtr(V, Temp.getElementType());
6470 CGF.Builder.CreateStore(V, Temp);
6477 bool MipsABIInfo::shouldSignExtUnsignedType(QualType Ty) const {
6478 int TySize = getContext().getTypeSize(Ty);
6480 // MIPS64 ABI requires unsigned 32 bit integers to be sign extended.
6481 if (Ty->isUnsignedIntegerOrEnumerationType() && TySize == 32)
6488 MIPSTargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
6489 llvm::Value *Address) const {
6490 // This information comes from gcc's implementation, which seems to
6491 // as canonical as it gets.
6493 // Everything on MIPS is 4 bytes. Double-precision FP registers
6494 // are aliased to pairs of single-precision FP registers.
6495 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4);
6497 // 0-31 are the general purpose registers, $0 - $31.
6498 // 32-63 are the floating-point registers, $f0 - $f31.
6499 // 64 and 65 are the multiply/divide registers, $hi and $lo.
6500 // 66 is the (notional, I think) register for signal-handler return.
6501 AssignToArrayRange(CGF.Builder, Address, Four8, 0, 65);
6503 // 67-74 are the floating-point status registers, $fcc0 - $fcc7.
6504 // They are one bit wide and ignored here.
6506 // 80-111 are the coprocessor 0 registers, $c0r0 - $c0r31.
6507 // (coprocessor 1 is the FP unit)
6508 // 112-143 are the coprocessor 2 registers, $c2r0 - $c2r31.
6509 // 144-175 are the coprocessor 3 registers, $c3r0 - $c3r31.
6510 // 176-181 are the DSP accumulator registers.
6511 AssignToArrayRange(CGF.Builder, Address, Four8, 80, 181);
6515 //===----------------------------------------------------------------------===//
6516 // TCE ABI Implementation (see http://tce.cs.tut.fi). Uses mostly the defaults.
6517 // Currently subclassed only to implement custom OpenCL C function attribute
6519 //===----------------------------------------------------------------------===//
6523 class TCETargetCodeGenInfo : public DefaultTargetCodeGenInfo {
6525 TCETargetCodeGenInfo(CodeGenTypes &CGT)
6526 : DefaultTargetCodeGenInfo(CGT) {}
6528 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
6529 CodeGen::CodeGenModule &M) const override;
6532 void TCETargetCodeGenInfo::setTargetAttributes(
6533 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const {
6534 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
6537 llvm::Function *F = cast<llvm::Function>(GV);
6539 if (M.getLangOpts().OpenCL) {
6540 if (FD->hasAttr<OpenCLKernelAttr>()) {
6541 // OpenCL C Kernel functions are not subject to inlining
6542 F->addFnAttr(llvm::Attribute::NoInline);
6543 const ReqdWorkGroupSizeAttr *Attr = FD->getAttr<ReqdWorkGroupSizeAttr>();
6545 // Convert the reqd_work_group_size() attributes to metadata.
6546 llvm::LLVMContext &Context = F->getContext();
6547 llvm::NamedMDNode *OpenCLMetadata =
6548 M.getModule().getOrInsertNamedMetadata(
6549 "opencl.kernel_wg_size_info");
6551 SmallVector<llvm::Metadata *, 5> Operands;
6552 Operands.push_back(llvm::ConstantAsMetadata::get(F));
6555 llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue(
6556 M.Int32Ty, llvm::APInt(32, Attr->getXDim()))));
6558 llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue(
6559 M.Int32Ty, llvm::APInt(32, Attr->getYDim()))));
6561 llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue(
6562 M.Int32Ty, llvm::APInt(32, Attr->getZDim()))));
6564 // Add a boolean constant operand for "required" (true) or "hint"
6565 // (false) for implementing the work_group_size_hint attr later.
6566 // Currently always true as the hint is not yet implemented.
6568 llvm::ConstantAsMetadata::get(llvm::ConstantInt::getTrue(Context)));
6569 OpenCLMetadata->addOperand(llvm::MDNode::get(Context, Operands));
6577 //===----------------------------------------------------------------------===//
6578 // Hexagon ABI Implementation
6579 //===----------------------------------------------------------------------===//
6583 class HexagonABIInfo : public ABIInfo {
6587 HexagonABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
6591 ABIArgInfo classifyReturnType(QualType RetTy) const;
6592 ABIArgInfo classifyArgumentType(QualType RetTy) const;
6594 void computeInfo(CGFunctionInfo &FI) const override;
6596 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
6597 QualType Ty) const override;
6600 class HexagonTargetCodeGenInfo : public TargetCodeGenInfo {
6602 HexagonTargetCodeGenInfo(CodeGenTypes &CGT)
6603 :TargetCodeGenInfo(new HexagonABIInfo(CGT)) {}
6605 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
6612 void HexagonABIInfo::computeInfo(CGFunctionInfo &FI) const {
6613 if (!getCXXABI().classifyReturnType(FI))
6614 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
6615 for (auto &I : FI.arguments())
6616 I.info = classifyArgumentType(I.type);
6619 ABIArgInfo HexagonABIInfo::classifyArgumentType(QualType Ty) const {
6620 if (!isAggregateTypeForABI(Ty)) {
6621 // Treat an enum type as its underlying type.
6622 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
6623 Ty = EnumTy->getDecl()->getIntegerType();
6625 return (Ty->isPromotableIntegerType() ?
6626 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
6629 // Ignore empty records.
6630 if (isEmptyRecord(getContext(), Ty, true))
6631 return ABIArgInfo::getIgnore();
6633 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
6634 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
6636 uint64_t Size = getContext().getTypeSize(Ty);
6638 return getNaturalAlignIndirect(Ty, /*ByVal=*/true);
6639 // Pass in the smallest viable integer type.
6641 return ABIArgInfo::getDirect(llvm::Type::getInt64Ty(getVMContext()));
6643 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
6645 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
6647 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
6650 ABIArgInfo HexagonABIInfo::classifyReturnType(QualType RetTy) const {
6651 if (RetTy->isVoidType())
6652 return ABIArgInfo::getIgnore();
6654 // Large vector types should be returned via memory.
6655 if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 64)
6656 return getNaturalAlignIndirect(RetTy);
6658 if (!isAggregateTypeForABI(RetTy)) {
6659 // Treat an enum type as its underlying type.
6660 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
6661 RetTy = EnumTy->getDecl()->getIntegerType();
6663 return (RetTy->isPromotableIntegerType() ?
6664 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
6667 if (isEmptyRecord(getContext(), RetTy, true))
6668 return ABIArgInfo::getIgnore();
6670 // Aggregates <= 8 bytes are returned in r0; other aggregates
6671 // are returned indirectly.
6672 uint64_t Size = getContext().getTypeSize(RetTy);
6674 // Return in the smallest viable integer type.
6676 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
6678 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
6680 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
6681 return ABIArgInfo::getDirect(llvm::Type::getInt64Ty(getVMContext()));
6684 return getNaturalAlignIndirect(RetTy, /*ByVal=*/true);
6687 Address HexagonABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
6688 QualType Ty) const {
6689 // FIXME: Someone needs to audit that this handle alignment correctly.
6690 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*indirect*/ false,
6691 getContext().getTypeInfoInChars(Ty),
6692 CharUnits::fromQuantity(4),
6693 /*AllowHigherAlign*/ true);
6696 //===----------------------------------------------------------------------===//
6697 // Lanai ABI Implementation
6698 //===----------------------------------------------------------------------===//
6701 class LanaiABIInfo : public DefaultABIInfo {
6703 LanaiABIInfo(CodeGen::CodeGenTypes &CGT) : DefaultABIInfo(CGT) {}
6705 bool shouldUseInReg(QualType Ty, CCState &State) const;
6707 void computeInfo(CGFunctionInfo &FI) const override {
6708 CCState State(FI.getCallingConvention());
6709 // Lanai uses 4 registers to pass arguments unless the function has the
6710 // regparm attribute set.
6711 if (FI.getHasRegParm()) {
6712 State.FreeRegs = FI.getRegParm();
6717 if (!getCXXABI().classifyReturnType(FI))
6718 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
6719 for (auto &I : FI.arguments())
6720 I.info = classifyArgumentType(I.type, State);
6723 ABIArgInfo getIndirectResult(QualType Ty, bool ByVal, CCState &State) const;
6724 ABIArgInfo classifyArgumentType(QualType RetTy, CCState &State) const;
6726 } // end anonymous namespace
6728 bool LanaiABIInfo::shouldUseInReg(QualType Ty, CCState &State) const {
6729 unsigned Size = getContext().getTypeSize(Ty);
6730 unsigned SizeInRegs = llvm::alignTo(Size, 32U) / 32U;
6732 if (SizeInRegs == 0)
6735 if (SizeInRegs > State.FreeRegs) {
6740 State.FreeRegs -= SizeInRegs;
6745 ABIArgInfo LanaiABIInfo::getIndirectResult(QualType Ty, bool ByVal,
6746 CCState &State) const {
6748 if (State.FreeRegs) {
6749 --State.FreeRegs; // Non-byval indirects just use one pointer.
6750 return getNaturalAlignIndirectInReg(Ty);
6752 return getNaturalAlignIndirect(Ty, false);
6755 // Compute the byval alignment.
6756 const unsigned MinABIStackAlignInBytes = 4;
6757 unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8;
6758 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(4), /*ByVal=*/true,
6759 /*Realign=*/TypeAlign >
6760 MinABIStackAlignInBytes);
6763 ABIArgInfo LanaiABIInfo::classifyArgumentType(QualType Ty,
6764 CCState &State) const {
6765 // Check with the C++ ABI first.
6766 const RecordType *RT = Ty->getAs<RecordType>();
6768 CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI());
6769 if (RAA == CGCXXABI::RAA_Indirect) {
6770 return getIndirectResult(Ty, /*ByVal=*/false, State);
6771 } else if (RAA == CGCXXABI::RAA_DirectInMemory) {
6772 return getNaturalAlignIndirect(Ty, /*ByRef=*/true);
6776 if (isAggregateTypeForABI(Ty)) {
6777 // Structures with flexible arrays are always indirect.
6778 if (RT && RT->getDecl()->hasFlexibleArrayMember())
6779 return getIndirectResult(Ty, /*ByVal=*/true, State);
6781 // Ignore empty structs/unions.
6782 if (isEmptyRecord(getContext(), Ty, true))
6783 return ABIArgInfo::getIgnore();
6785 llvm::LLVMContext &LLVMContext = getVMContext();
6786 unsigned SizeInRegs = (getContext().getTypeSize(Ty) + 31) / 32;
6787 if (SizeInRegs <= State.FreeRegs) {
6788 llvm::IntegerType *Int32 = llvm::Type::getInt32Ty(LLVMContext);
6789 SmallVector<llvm::Type *, 3> Elements(SizeInRegs, Int32);
6790 llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements);
6791 State.FreeRegs -= SizeInRegs;
6792 return ABIArgInfo::getDirectInReg(Result);
6796 return getIndirectResult(Ty, true, State);
6799 // Treat an enum type as its underlying type.
6800 if (const auto *EnumTy = Ty->getAs<EnumType>())
6801 Ty = EnumTy->getDecl()->getIntegerType();
6803 bool InReg = shouldUseInReg(Ty, State);
6804 if (Ty->isPromotableIntegerType()) {
6806 return ABIArgInfo::getDirectInReg();
6807 return ABIArgInfo::getExtend();
6810 return ABIArgInfo::getDirectInReg();
6811 return ABIArgInfo::getDirect();
6815 class LanaiTargetCodeGenInfo : public TargetCodeGenInfo {
6817 LanaiTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
6818 : TargetCodeGenInfo(new LanaiABIInfo(CGT)) {}
6822 //===----------------------------------------------------------------------===//
6823 // AMDGPU ABI Implementation
6824 //===----------------------------------------------------------------------===//
6828 class AMDGPUTargetCodeGenInfo : public TargetCodeGenInfo {
6830 AMDGPUTargetCodeGenInfo(CodeGenTypes &CGT)
6831 : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {}
6832 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
6833 CodeGen::CodeGenModule &M) const override;
6834 unsigned getOpenCLKernelCallingConv() const override;
6839 void AMDGPUTargetCodeGenInfo::setTargetAttributes(
6841 llvm::GlobalValue *GV,
6842 CodeGen::CodeGenModule &M) const {
6843 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
6847 if (const auto Attr = FD->getAttr<AMDGPUNumVGPRAttr>()) {
6848 llvm::Function *F = cast<llvm::Function>(GV);
6849 uint32_t NumVGPR = Attr->getNumVGPR();
6851 F->addFnAttr("amdgpu_num_vgpr", llvm::utostr(NumVGPR));
6854 if (const auto Attr = FD->getAttr<AMDGPUNumSGPRAttr>()) {
6855 llvm::Function *F = cast<llvm::Function>(GV);
6856 unsigned NumSGPR = Attr->getNumSGPR();
6858 F->addFnAttr("amdgpu_num_sgpr", llvm::utostr(NumSGPR));
6863 unsigned AMDGPUTargetCodeGenInfo::getOpenCLKernelCallingConv() const {
6864 return llvm::CallingConv::AMDGPU_KERNEL;
6867 //===----------------------------------------------------------------------===//
6868 // SPARC v8 ABI Implementation.
6869 // Based on the SPARC Compliance Definition version 2.4.1.
6871 // Ensures that complex values are passed in registers.
6874 class SparcV8ABIInfo : public DefaultABIInfo {
6876 SparcV8ABIInfo(CodeGenTypes &CGT) : DefaultABIInfo(CGT) {}
6879 ABIArgInfo classifyReturnType(QualType RetTy) const;
6880 void computeInfo(CGFunctionInfo &FI) const override;
6882 } // end anonymous namespace
6886 SparcV8ABIInfo::classifyReturnType(QualType Ty) const {
6887 if (Ty->isAnyComplexType()) {
6888 return ABIArgInfo::getDirect();
6891 return DefaultABIInfo::classifyReturnType(Ty);
6895 void SparcV8ABIInfo::computeInfo(CGFunctionInfo &FI) const {
6897 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
6898 for (auto &Arg : FI.arguments())
6899 Arg.info = classifyArgumentType(Arg.type);
6903 class SparcV8TargetCodeGenInfo : public TargetCodeGenInfo {
6905 SparcV8TargetCodeGenInfo(CodeGenTypes &CGT)
6906 : TargetCodeGenInfo(new SparcV8ABIInfo(CGT)) {}
6908 } // end anonymous namespace
6910 //===----------------------------------------------------------------------===//
6911 // SPARC v9 ABI Implementation.
6912 // Based on the SPARC Compliance Definition version 2.4.1.
6914 // Function arguments a mapped to a nominal "parameter array" and promoted to
6915 // registers depending on their type. Each argument occupies 8 or 16 bytes in
6916 // the array, structs larger than 16 bytes are passed indirectly.
6918 // One case requires special care:
6925 // When a struct mixed is passed by value, it only occupies 8 bytes in the
6926 // parameter array, but the int is passed in an integer register, and the float
6927 // is passed in a floating point register. This is represented as two arguments
6928 // with the LLVM IR inreg attribute:
6930 // declare void f(i32 inreg %i, float inreg %f)
6932 // The code generator will only allocate 4 bytes from the parameter array for
6933 // the inreg arguments. All other arguments are allocated a multiple of 8
6937 class SparcV9ABIInfo : public ABIInfo {
6939 SparcV9ABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
6942 ABIArgInfo classifyType(QualType RetTy, unsigned SizeLimit) const;
6943 void computeInfo(CGFunctionInfo &FI) const override;
6944 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
6945 QualType Ty) const override;
6947 // Coercion type builder for structs passed in registers. The coercion type
6948 // serves two purposes:
6950 // 1. Pad structs to a multiple of 64 bits, so they are passed 'left-aligned'
6952 // 2. Expose aligned floating point elements as first-level elements, so the
6953 // code generator knows to pass them in floating point registers.
6955 // We also compute the InReg flag which indicates that the struct contains
6956 // aligned 32-bit floats.
6958 struct CoerceBuilder {
6959 llvm::LLVMContext &Context;
6960 const llvm::DataLayout &DL;
6961 SmallVector<llvm::Type*, 8> Elems;
6965 CoerceBuilder(llvm::LLVMContext &c, const llvm::DataLayout &dl)
6966 : Context(c), DL(dl), Size(0), InReg(false) {}
6968 // Pad Elems with integers until Size is ToSize.
6969 void pad(uint64_t ToSize) {
6970 assert(ToSize >= Size && "Cannot remove elements");
6974 // Finish the current 64-bit word.
6975 uint64_t Aligned = llvm::alignTo(Size, 64);
6976 if (Aligned > Size && Aligned <= ToSize) {
6977 Elems.push_back(llvm::IntegerType::get(Context, Aligned - Size));
6981 // Add whole 64-bit words.
6982 while (Size + 64 <= ToSize) {
6983 Elems.push_back(llvm::Type::getInt64Ty(Context));
6987 // Final in-word padding.
6988 if (Size < ToSize) {
6989 Elems.push_back(llvm::IntegerType::get(Context, ToSize - Size));
6994 // Add a floating point element at Offset.
6995 void addFloat(uint64_t Offset, llvm::Type *Ty, unsigned Bits) {
6996 // Unaligned floats are treated as integers.
6999 // The InReg flag is only required if there are any floats < 64 bits.
7003 Elems.push_back(Ty);
7004 Size = Offset + Bits;
7007 // Add a struct type to the coercion type, starting at Offset (in bits).
7008 void addStruct(uint64_t Offset, llvm::StructType *StrTy) {
7009 const llvm::StructLayout *Layout = DL.getStructLayout(StrTy);
7010 for (unsigned i = 0, e = StrTy->getNumElements(); i != e; ++i) {
7011 llvm::Type *ElemTy = StrTy->getElementType(i);
7012 uint64_t ElemOffset = Offset + Layout->getElementOffsetInBits(i);
7013 switch (ElemTy->getTypeID()) {
7014 case llvm::Type::StructTyID:
7015 addStruct(ElemOffset, cast<llvm::StructType>(ElemTy));
7017 case llvm::Type::FloatTyID:
7018 addFloat(ElemOffset, ElemTy, 32);
7020 case llvm::Type::DoubleTyID:
7021 addFloat(ElemOffset, ElemTy, 64);
7023 case llvm::Type::FP128TyID:
7024 addFloat(ElemOffset, ElemTy, 128);
7026 case llvm::Type::PointerTyID:
7027 if (ElemOffset % 64 == 0) {
7029 Elems.push_back(ElemTy);
7039 // Check if Ty is a usable substitute for the coercion type.
7040 bool isUsableType(llvm::StructType *Ty) const {
7041 return llvm::makeArrayRef(Elems) == Ty->elements();
7044 // Get the coercion type as a literal struct type.
7045 llvm::Type *getType() const {
7046 if (Elems.size() == 1)
7047 return Elems.front();
7049 return llvm::StructType::get(Context, Elems);
7053 } // end anonymous namespace
7056 SparcV9ABIInfo::classifyType(QualType Ty, unsigned SizeLimit) const {
7057 if (Ty->isVoidType())
7058 return ABIArgInfo::getIgnore();
7060 uint64_t Size = getContext().getTypeSize(Ty);
7062 // Anything too big to fit in registers is passed with an explicit indirect
7063 // pointer / sret pointer.
7064 if (Size > SizeLimit)
7065 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
7067 // Treat an enum type as its underlying type.
7068 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
7069 Ty = EnumTy->getDecl()->getIntegerType();
7071 // Integer types smaller than a register are extended.
7072 if (Size < 64 && Ty->isIntegerType())
7073 return ABIArgInfo::getExtend();
7075 // Other non-aggregates go in registers.
7076 if (!isAggregateTypeForABI(Ty))
7077 return ABIArgInfo::getDirect();
7079 // If a C++ object has either a non-trivial copy constructor or a non-trivial
7080 // destructor, it is passed with an explicit indirect pointer / sret pointer.
7081 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
7082 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
7084 // This is a small aggregate type that should be passed in registers.
7085 // Build a coercion type from the LLVM struct type.
7086 llvm::StructType *StrTy = dyn_cast<llvm::StructType>(CGT.ConvertType(Ty));
7088 return ABIArgInfo::getDirect();
7090 CoerceBuilder CB(getVMContext(), getDataLayout());
7091 CB.addStruct(0, StrTy);
7092 CB.pad(llvm::alignTo(CB.DL.getTypeSizeInBits(StrTy), 64));
7094 // Try to use the original type for coercion.
7095 llvm::Type *CoerceTy = CB.isUsableType(StrTy) ? StrTy : CB.getType();
7098 return ABIArgInfo::getDirectInReg(CoerceTy);
7100 return ABIArgInfo::getDirect(CoerceTy);
7103 Address SparcV9ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
7104 QualType Ty) const {
7105 ABIArgInfo AI = classifyType(Ty, 16 * 8);
7106 llvm::Type *ArgTy = CGT.ConvertType(Ty);
7107 if (AI.canHaveCoerceToType() && !AI.getCoerceToType())
7108 AI.setCoerceToType(ArgTy);
7110 CharUnits SlotSize = CharUnits::fromQuantity(8);
7112 CGBuilderTy &Builder = CGF.Builder;
7113 Address Addr(Builder.CreateLoad(VAListAddr, "ap.cur"), SlotSize);
7114 llvm::Type *ArgPtrTy = llvm::PointerType::getUnqual(ArgTy);
7116 auto TypeInfo = getContext().getTypeInfoInChars(Ty);
7118 Address ArgAddr = Address::invalid();
7120 switch (AI.getKind()) {
7121 case ABIArgInfo::Expand:
7122 case ABIArgInfo::CoerceAndExpand:
7123 case ABIArgInfo::InAlloca:
7124 llvm_unreachable("Unsupported ABI kind for va_arg");
7126 case ABIArgInfo::Extend: {
7128 CharUnits Offset = SlotSize - TypeInfo.first;
7129 ArgAddr = Builder.CreateConstInBoundsByteGEP(Addr, Offset, "extend");
7133 case ABIArgInfo::Direct: {
7134 auto AllocSize = getDataLayout().getTypeAllocSize(AI.getCoerceToType());
7135 Stride = CharUnits::fromQuantity(AllocSize).alignTo(SlotSize);
7140 case ABIArgInfo::Indirect:
7142 ArgAddr = Builder.CreateElementBitCast(Addr, ArgPtrTy, "indirect");
7143 ArgAddr = Address(Builder.CreateLoad(ArgAddr, "indirect.arg"),
7147 case ABIArgInfo::Ignore:
7148 return Address(llvm::UndefValue::get(ArgPtrTy), TypeInfo.second);
7152 llvm::Value *NextPtr =
7153 Builder.CreateConstInBoundsByteGEP(Addr.getPointer(), Stride, "ap.next");
7154 Builder.CreateStore(NextPtr, VAListAddr);
7156 return Builder.CreateBitCast(ArgAddr, ArgPtrTy, "arg.addr");
7159 void SparcV9ABIInfo::computeInfo(CGFunctionInfo &FI) const {
7160 FI.getReturnInfo() = classifyType(FI.getReturnType(), 32 * 8);
7161 for (auto &I : FI.arguments())
7162 I.info = classifyType(I.type, 16 * 8);
7166 class SparcV9TargetCodeGenInfo : public TargetCodeGenInfo {
7168 SparcV9TargetCodeGenInfo(CodeGenTypes &CGT)
7169 : TargetCodeGenInfo(new SparcV9ABIInfo(CGT)) {}
7171 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
7175 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
7176 llvm::Value *Address) const override;
7178 } // end anonymous namespace
7181 SparcV9TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
7182 llvm::Value *Address) const {
7183 // This is calculated from the LLVM and GCC tables and verified
7184 // against gcc output. AFAIK all ABIs use the same encoding.
7186 CodeGen::CGBuilderTy &Builder = CGF.Builder;
7188 llvm::IntegerType *i8 = CGF.Int8Ty;
7189 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
7190 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
7192 // 0-31: the 8-byte general-purpose registers
7193 AssignToArrayRange(Builder, Address, Eight8, 0, 31);
7195 // 32-63: f0-31, the 4-byte floating-point registers
7196 AssignToArrayRange(Builder, Address, Four8, 32, 63);
7206 AssignToArrayRange(Builder, Address, Eight8, 64, 71);
7208 // 72-87: d0-15, the 8-byte floating-point registers
7209 AssignToArrayRange(Builder, Address, Eight8, 72, 87);
7215 //===----------------------------------------------------------------------===//
7216 // XCore ABI Implementation
7217 //===----------------------------------------------------------------------===//
7221 /// A SmallStringEnc instance is used to build up the TypeString by passing
7222 /// it by reference between functions that append to it.
7223 typedef llvm::SmallString<128> SmallStringEnc;
7225 /// TypeStringCache caches the meta encodings of Types.
7227 /// The reason for caching TypeStrings is two fold:
7228 /// 1. To cache a type's encoding for later uses;
7229 /// 2. As a means to break recursive member type inclusion.
7231 /// A cache Entry can have a Status of:
7232 /// NonRecursive: The type encoding is not recursive;
7233 /// Recursive: The type encoding is recursive;
7234 /// Incomplete: An incomplete TypeString;
7235 /// IncompleteUsed: An incomplete TypeString that has been used in a
7236 /// Recursive type encoding.
7238 /// A NonRecursive entry will have all of its sub-members expanded as fully
7239 /// as possible. Whilst it may contain types which are recursive, the type
7240 /// itself is not recursive and thus its encoding may be safely used whenever
7241 /// the type is encountered.
7243 /// A Recursive entry will have all of its sub-members expanded as fully as
7244 /// possible. The type itself is recursive and it may contain other types which
7245 /// are recursive. The Recursive encoding must not be used during the expansion
7246 /// of a recursive type's recursive branch. For simplicity the code uses
7247 /// IncompleteCount to reject all usage of Recursive encodings for member types.
7249 /// An Incomplete entry is always a RecordType and only encodes its
7250 /// identifier e.g. "s(S){}". Incomplete 'StubEnc' entries are ephemeral and
7251 /// are placed into the cache during type expansion as a means to identify and
7252 /// handle recursive inclusion of types as sub-members. If there is recursion
7253 /// the entry becomes IncompleteUsed.
7255 /// During the expansion of a RecordType's members:
7257 /// If the cache contains a NonRecursive encoding for the member type, the
7258 /// cached encoding is used;
7260 /// If the cache contains a Recursive encoding for the member type, the
7261 /// cached encoding is 'Swapped' out, as it may be incorrect, and...
7263 /// If the member is a RecordType, an Incomplete encoding is placed into the
7264 /// cache to break potential recursive inclusion of itself as a sub-member;
7266 /// Once a member RecordType has been expanded, its temporary incomplete
7267 /// entry is removed from the cache. If a Recursive encoding was swapped out
7268 /// it is swapped back in;
7270 /// If an incomplete entry is used to expand a sub-member, the incomplete
7271 /// entry is marked as IncompleteUsed. The cache keeps count of how many
7272 /// IncompleteUsed entries it currently contains in IncompleteUsedCount;
7274 /// If a member's encoding is found to be a NonRecursive or Recursive viz:
7275 /// IncompleteUsedCount==0, the member's encoding is added to the cache.
7276 /// Else the member is part of a recursive type and thus the recursion has
7277 /// been exited too soon for the encoding to be correct for the member.
7279 class TypeStringCache {
7280 enum Status {NonRecursive, Recursive, Incomplete, IncompleteUsed};
7282 std::string Str; // The encoded TypeString for the type.
7283 enum Status State; // Information about the encoding in 'Str'.
7284 std::string Swapped; // A temporary place holder for a Recursive encoding
7285 // during the expansion of RecordType's members.
7287 std::map<const IdentifierInfo *, struct Entry> Map;
7288 unsigned IncompleteCount; // Number of Incomplete entries in the Map.
7289 unsigned IncompleteUsedCount; // Number of IncompleteUsed entries in the Map.
7291 TypeStringCache() : IncompleteCount(0), IncompleteUsedCount(0) {}
7292 void addIncomplete(const IdentifierInfo *ID, std::string StubEnc);
7293 bool removeIncomplete(const IdentifierInfo *ID);
7294 void addIfComplete(const IdentifierInfo *ID, StringRef Str,
7296 StringRef lookupStr(const IdentifierInfo *ID);
7299 /// TypeString encodings for enum & union fields must be order.
7300 /// FieldEncoding is a helper for this ordering process.
7301 class FieldEncoding {
7305 FieldEncoding(bool b, SmallStringEnc &e) : HasName(b), Enc(e.c_str()) {}
7306 StringRef str() {return Enc.c_str();}
7307 bool operator<(const FieldEncoding &rhs) const {
7308 if (HasName != rhs.HasName) return HasName;
7309 return Enc < rhs.Enc;
7313 class XCoreABIInfo : public DefaultABIInfo {
7315 XCoreABIInfo(CodeGen::CodeGenTypes &CGT) : DefaultABIInfo(CGT) {}
7316 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
7317 QualType Ty) const override;
7320 class XCoreTargetCodeGenInfo : public TargetCodeGenInfo {
7321 mutable TypeStringCache TSC;
7323 XCoreTargetCodeGenInfo(CodeGenTypes &CGT)
7324 :TargetCodeGenInfo(new XCoreABIInfo(CGT)) {}
7325 void emitTargetMD(const Decl *D, llvm::GlobalValue *GV,
7326 CodeGen::CodeGenModule &M) const override;
7329 } // End anonymous namespace.
7331 // TODO: this implementation is likely now redundant with the default
7333 Address XCoreABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
7334 QualType Ty) const {
7335 CGBuilderTy &Builder = CGF.Builder;
7338 CharUnits SlotSize = CharUnits::fromQuantity(4);
7339 Address AP(Builder.CreateLoad(VAListAddr), SlotSize);
7341 // Handle the argument.
7342 ABIArgInfo AI = classifyArgumentType(Ty);
7343 CharUnits TypeAlign = getContext().getTypeAlignInChars(Ty);
7344 llvm::Type *ArgTy = CGT.ConvertType(Ty);
7345 if (AI.canHaveCoerceToType() && !AI.getCoerceToType())
7346 AI.setCoerceToType(ArgTy);
7347 llvm::Type *ArgPtrTy = llvm::PointerType::getUnqual(ArgTy);
7349 Address Val = Address::invalid();
7350 CharUnits ArgSize = CharUnits::Zero();
7351 switch (AI.getKind()) {
7352 case ABIArgInfo::Expand:
7353 case ABIArgInfo::CoerceAndExpand:
7354 case ABIArgInfo::InAlloca:
7355 llvm_unreachable("Unsupported ABI kind for va_arg");
7356 case ABIArgInfo::Ignore:
7357 Val = Address(llvm::UndefValue::get(ArgPtrTy), TypeAlign);
7358 ArgSize = CharUnits::Zero();
7360 case ABIArgInfo::Extend:
7361 case ABIArgInfo::Direct:
7362 Val = Builder.CreateBitCast(AP, ArgPtrTy);
7363 ArgSize = CharUnits::fromQuantity(
7364 getDataLayout().getTypeAllocSize(AI.getCoerceToType()));
7365 ArgSize = ArgSize.alignTo(SlotSize);
7367 case ABIArgInfo::Indirect:
7368 Val = Builder.CreateElementBitCast(AP, ArgPtrTy);
7369 Val = Address(Builder.CreateLoad(Val), TypeAlign);
7374 // Increment the VAList.
7375 if (!ArgSize.isZero()) {
7377 Builder.CreateConstInBoundsByteGEP(AP.getPointer(), ArgSize);
7378 Builder.CreateStore(APN, VAListAddr);
7384 /// During the expansion of a RecordType, an incomplete TypeString is placed
7385 /// into the cache as a means to identify and break recursion.
7386 /// If there is a Recursive encoding in the cache, it is swapped out and will
7387 /// be reinserted by removeIncomplete().
7388 /// All other types of encoding should have been used rather than arriving here.
7389 void TypeStringCache::addIncomplete(const IdentifierInfo *ID,
7390 std::string StubEnc) {
7394 assert( (E.Str.empty() || E.State == Recursive) &&
7395 "Incorrectly use of addIncomplete");
7396 assert(!StubEnc.empty() && "Passing an empty string to addIncomplete()");
7397 E.Swapped.swap(E.Str); // swap out the Recursive
7398 E.Str.swap(StubEnc);
7399 E.State = Incomplete;
7403 /// Once the RecordType has been expanded, the temporary incomplete TypeString
7404 /// must be removed from the cache.
7405 /// If a Recursive was swapped out by addIncomplete(), it will be replaced.
7406 /// Returns true if the RecordType was defined recursively.
7407 bool TypeStringCache::removeIncomplete(const IdentifierInfo *ID) {
7410 auto I = Map.find(ID);
7411 assert(I != Map.end() && "Entry not present");
7412 Entry &E = I->second;
7413 assert( (E.State == Incomplete ||
7414 E.State == IncompleteUsed) &&
7415 "Entry must be an incomplete type");
7416 bool IsRecursive = false;
7417 if (E.State == IncompleteUsed) {
7418 // We made use of our Incomplete encoding, thus we are recursive.
7420 --IncompleteUsedCount;
7422 if (E.Swapped.empty())
7425 // Swap the Recursive back.
7426 E.Swapped.swap(E.Str);
7428 E.State = Recursive;
7434 /// Add the encoded TypeString to the cache only if it is NonRecursive or
7435 /// Recursive (viz: all sub-members were expanded as fully as possible).
7436 void TypeStringCache::addIfComplete(const IdentifierInfo *ID, StringRef Str,
7438 if (!ID || IncompleteUsedCount)
7439 return; // No key or it is is an incomplete sub-type so don't add.
7441 if (IsRecursive && !E.Str.empty()) {
7442 assert(E.State==Recursive && E.Str.size() == Str.size() &&
7443 "This is not the same Recursive entry");
7444 // The parent container was not recursive after all, so we could have used
7445 // this Recursive sub-member entry after all, but we assumed the worse when
7446 // we started viz: IncompleteCount!=0.
7449 assert(E.Str.empty() && "Entry already present");
7451 E.State = IsRecursive? Recursive : NonRecursive;
7454 /// Return a cached TypeString encoding for the ID. If there isn't one, or we
7455 /// are recursively expanding a type (IncompleteCount != 0) and the cached
7456 /// encoding is Recursive, return an empty StringRef.
7457 StringRef TypeStringCache::lookupStr(const IdentifierInfo *ID) {
7459 return StringRef(); // We have no key.
7460 auto I = Map.find(ID);
7462 return StringRef(); // We have no encoding.
7463 Entry &E = I->second;
7464 if (E.State == Recursive && IncompleteCount)
7465 return StringRef(); // We don't use Recursive encodings for member types.
7467 if (E.State == Incomplete) {
7468 // The incomplete type is being used to break out of recursion.
7469 E.State = IncompleteUsed;
7470 ++IncompleteUsedCount;
7472 return E.Str.c_str();
7475 /// The XCore ABI includes a type information section that communicates symbol
7476 /// type information to the linker. The linker uses this information to verify
7477 /// safety/correctness of things such as array bound and pointers et al.
7478 /// The ABI only requires C (and XC) language modules to emit TypeStrings.
7479 /// This type information (TypeString) is emitted into meta data for all global
7480 /// symbols: definitions, declarations, functions & variables.
7482 /// The TypeString carries type, qualifier, name, size & value details.
7483 /// Please see 'Tools Development Guide' section 2.16.2 for format details:
7484 /// https://www.xmos.com/download/public/Tools-Development-Guide%28X9114A%29.pdf
7485 /// The output is tested by test/CodeGen/xcore-stringtype.c.
7487 static bool getTypeString(SmallStringEnc &Enc, const Decl *D,
7488 CodeGen::CodeGenModule &CGM, TypeStringCache &TSC);
7490 /// XCore uses emitTargetMD to emit TypeString metadata for global symbols.
7491 void XCoreTargetCodeGenInfo::emitTargetMD(const Decl *D, llvm::GlobalValue *GV,
7492 CodeGen::CodeGenModule &CGM) const {
7494 if (getTypeString(Enc, D, CGM, TSC)) {
7495 llvm::LLVMContext &Ctx = CGM.getModule().getContext();
7496 llvm::Metadata *MDVals[] = {llvm::ConstantAsMetadata::get(GV),
7497 llvm::MDString::get(Ctx, Enc.str())};
7498 llvm::NamedMDNode *MD =
7499 CGM.getModule().getOrInsertNamedMetadata("xcore.typestrings");
7500 MD->addOperand(llvm::MDNode::get(Ctx, MDVals));
7504 //===----------------------------------------------------------------------===//
7505 // SPIR ABI Implementation
7506 //===----------------------------------------------------------------------===//
7509 class SPIRTargetCodeGenInfo : public TargetCodeGenInfo {
7511 SPIRTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
7512 : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {}
7513 void emitTargetMD(const Decl *D, llvm::GlobalValue *GV,
7514 CodeGen::CodeGenModule &M) const override;
7515 unsigned getOpenCLKernelCallingConv() const override;
7517 } // End anonymous namespace.
7519 /// Emit SPIR specific metadata: OpenCL and SPIR version.
7520 void SPIRTargetCodeGenInfo::emitTargetMD(const Decl *D, llvm::GlobalValue *GV,
7521 CodeGen::CodeGenModule &CGM) const {
7522 llvm::LLVMContext &Ctx = CGM.getModule().getContext();
7523 llvm::Type *Int32Ty = llvm::Type::getInt32Ty(Ctx);
7524 llvm::Module &M = CGM.getModule();
7525 // SPIR v2.0 s2.12 - The SPIR version used by the module is stored in the
7526 // opencl.spir.version named metadata.
7527 llvm::Metadata *SPIRVerElts[] = {
7528 llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(Int32Ty, 2)),
7529 llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(Int32Ty, 0))};
7530 llvm::NamedMDNode *SPIRVerMD =
7531 M.getOrInsertNamedMetadata("opencl.spir.version");
7532 SPIRVerMD->addOperand(llvm::MDNode::get(Ctx, SPIRVerElts));
7533 // SPIR v2.0 s2.13 - The OpenCL version used by the module is stored in the
7534 // opencl.ocl.version named metadata node.
7535 llvm::Metadata *OCLVerElts[] = {
7536 llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
7537 Int32Ty, CGM.getLangOpts().OpenCLVersion / 100)),
7538 llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
7539 Int32Ty, (CGM.getLangOpts().OpenCLVersion % 100) / 10))};
7540 llvm::NamedMDNode *OCLVerMD =
7541 M.getOrInsertNamedMetadata("opencl.ocl.version");
7542 OCLVerMD->addOperand(llvm::MDNode::get(Ctx, OCLVerElts));
7545 unsigned SPIRTargetCodeGenInfo::getOpenCLKernelCallingConv() const {
7546 return llvm::CallingConv::SPIR_KERNEL;
7549 static bool appendType(SmallStringEnc &Enc, QualType QType,
7550 const CodeGen::CodeGenModule &CGM,
7551 TypeStringCache &TSC);
7553 /// Helper function for appendRecordType().
7554 /// Builds a SmallVector containing the encoded field types in declaration
7556 static bool extractFieldType(SmallVectorImpl<FieldEncoding> &FE,
7557 const RecordDecl *RD,
7558 const CodeGen::CodeGenModule &CGM,
7559 TypeStringCache &TSC) {
7560 for (const auto *Field : RD->fields()) {
7563 Enc += Field->getName();
7565 if (Field->isBitField()) {
7567 llvm::raw_svector_ostream OS(Enc);
7568 OS << Field->getBitWidthValue(CGM.getContext());
7571 if (!appendType(Enc, Field->getType(), CGM, TSC))
7573 if (Field->isBitField())
7576 FE.emplace_back(!Field->getName().empty(), Enc);
7581 /// Appends structure and union types to Enc and adds encoding to cache.
7582 /// Recursively calls appendType (via extractFieldType) for each field.
7583 /// Union types have their fields ordered according to the ABI.
7584 static bool appendRecordType(SmallStringEnc &Enc, const RecordType *RT,
7585 const CodeGen::CodeGenModule &CGM,
7586 TypeStringCache &TSC, const IdentifierInfo *ID) {
7587 // Append the cached TypeString if we have one.
7588 StringRef TypeString = TSC.lookupStr(ID);
7589 if (!TypeString.empty()) {
7594 // Start to emit an incomplete TypeString.
7595 size_t Start = Enc.size();
7596 Enc += (RT->isUnionType()? 'u' : 's');
7599 Enc += ID->getName();
7602 // We collect all encoded fields and order as necessary.
7603 bool IsRecursive = false;
7604 const RecordDecl *RD = RT->getDecl()->getDefinition();
7605 if (RD && !RD->field_empty()) {
7606 // An incomplete TypeString stub is placed in the cache for this RecordType
7607 // so that recursive calls to this RecordType will use it whilst building a
7608 // complete TypeString for this RecordType.
7609 SmallVector<FieldEncoding, 16> FE;
7610 std::string StubEnc(Enc.substr(Start).str());
7611 StubEnc += '}'; // StubEnc now holds a valid incomplete TypeString.
7612 TSC.addIncomplete(ID, std::move(StubEnc));
7613 if (!extractFieldType(FE, RD, CGM, TSC)) {
7614 (void) TSC.removeIncomplete(ID);
7617 IsRecursive = TSC.removeIncomplete(ID);
7618 // The ABI requires unions to be sorted but not structures.
7619 // See FieldEncoding::operator< for sort algorithm.
7620 if (RT->isUnionType())
7621 std::sort(FE.begin(), FE.end());
7622 // We can now complete the TypeString.
7623 unsigned E = FE.size();
7624 for (unsigned I = 0; I != E; ++I) {
7631 TSC.addIfComplete(ID, Enc.substr(Start), IsRecursive);
7635 /// Appends enum types to Enc and adds the encoding to the cache.
7636 static bool appendEnumType(SmallStringEnc &Enc, const EnumType *ET,
7637 TypeStringCache &TSC,
7638 const IdentifierInfo *ID) {
7639 // Append the cached TypeString if we have one.
7640 StringRef TypeString = TSC.lookupStr(ID);
7641 if (!TypeString.empty()) {
7646 size_t Start = Enc.size();
7649 Enc += ID->getName();
7652 // We collect all encoded enumerations and order them alphanumerically.
7653 if (const EnumDecl *ED = ET->getDecl()->getDefinition()) {
7654 SmallVector<FieldEncoding, 16> FE;
7655 for (auto I = ED->enumerator_begin(), E = ED->enumerator_end(); I != E;
7657 SmallStringEnc EnumEnc;
7659 EnumEnc += I->getName();
7661 I->getInitVal().toString(EnumEnc);
7663 FE.push_back(FieldEncoding(!I->getName().empty(), EnumEnc));
7665 std::sort(FE.begin(), FE.end());
7666 unsigned E = FE.size();
7667 for (unsigned I = 0; I != E; ++I) {
7674 TSC.addIfComplete(ID, Enc.substr(Start), false);
7678 /// Appends type's qualifier to Enc.
7679 /// This is done prior to appending the type's encoding.
7680 static void appendQualifier(SmallStringEnc &Enc, QualType QT) {
7681 // Qualifiers are emitted in alphabetical order.
7682 static const char *const Table[]={"","c:","r:","cr:","v:","cv:","rv:","crv:"};
7684 if (QT.isConstQualified())
7686 if (QT.isRestrictQualified())
7688 if (QT.isVolatileQualified())
7690 Enc += Table[Lookup];
7693 /// Appends built-in types to Enc.
7694 static bool appendBuiltinType(SmallStringEnc &Enc, const BuiltinType *BT) {
7695 const char *EncType;
7696 switch (BT->getKind()) {
7697 case BuiltinType::Void:
7700 case BuiltinType::Bool:
7703 case BuiltinType::Char_U:
7706 case BuiltinType::UChar:
7709 case BuiltinType::SChar:
7712 case BuiltinType::UShort:
7715 case BuiltinType::Short:
7718 case BuiltinType::UInt:
7721 case BuiltinType::Int:
7724 case BuiltinType::ULong:
7727 case BuiltinType::Long:
7730 case BuiltinType::ULongLong:
7733 case BuiltinType::LongLong:
7736 case BuiltinType::Float:
7739 case BuiltinType::Double:
7742 case BuiltinType::LongDouble:
7752 /// Appends a pointer encoding to Enc before calling appendType for the pointee.
7753 static bool appendPointerType(SmallStringEnc &Enc, const PointerType *PT,
7754 const CodeGen::CodeGenModule &CGM,
7755 TypeStringCache &TSC) {
7757 if (!appendType(Enc, PT->getPointeeType(), CGM, TSC))
7763 /// Appends array encoding to Enc before calling appendType for the element.
7764 static bool appendArrayType(SmallStringEnc &Enc, QualType QT,
7765 const ArrayType *AT,
7766 const CodeGen::CodeGenModule &CGM,
7767 TypeStringCache &TSC, StringRef NoSizeEnc) {
7768 if (AT->getSizeModifier() != ArrayType::Normal)
7771 if (const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(AT))
7772 CAT->getSize().toStringUnsigned(Enc);
7774 Enc += NoSizeEnc; // Global arrays use "*", otherwise it is "".
7776 // The Qualifiers should be attached to the type rather than the array.
7777 appendQualifier(Enc, QT);
7778 if (!appendType(Enc, AT->getElementType(), CGM, TSC))
7784 /// Appends a function encoding to Enc, calling appendType for the return type
7785 /// and the arguments.
7786 static bool appendFunctionType(SmallStringEnc &Enc, const FunctionType *FT,
7787 const CodeGen::CodeGenModule &CGM,
7788 TypeStringCache &TSC) {
7790 if (!appendType(Enc, FT->getReturnType(), CGM, TSC))
7793 if (const FunctionProtoType *FPT = FT->getAs<FunctionProtoType>()) {
7794 // N.B. we are only interested in the adjusted param types.
7795 auto I = FPT->param_type_begin();
7796 auto E = FPT->param_type_end();
7799 if (!appendType(Enc, *I, CGM, TSC))
7805 if (FPT->isVariadic())
7808 if (FPT->isVariadic())
7818 /// Handles the type's qualifier before dispatching a call to handle specific
7820 static bool appendType(SmallStringEnc &Enc, QualType QType,
7821 const CodeGen::CodeGenModule &CGM,
7822 TypeStringCache &TSC) {
7824 QualType QT = QType.getCanonicalType();
7826 if (const ArrayType *AT = QT->getAsArrayTypeUnsafe())
7827 // The Qualifiers should be attached to the type rather than the array.
7828 // Thus we don't call appendQualifier() here.
7829 return appendArrayType(Enc, QT, AT, CGM, TSC, "");
7831 appendQualifier(Enc, QT);
7833 if (const BuiltinType *BT = QT->getAs<BuiltinType>())
7834 return appendBuiltinType(Enc, BT);
7836 if (const PointerType *PT = QT->getAs<PointerType>())
7837 return appendPointerType(Enc, PT, CGM, TSC);
7839 if (const EnumType *ET = QT->getAs<EnumType>())
7840 return appendEnumType(Enc, ET, TSC, QT.getBaseTypeIdentifier());
7842 if (const RecordType *RT = QT->getAsStructureType())
7843 return appendRecordType(Enc, RT, CGM, TSC, QT.getBaseTypeIdentifier());
7845 if (const RecordType *RT = QT->getAsUnionType())
7846 return appendRecordType(Enc, RT, CGM, TSC, QT.getBaseTypeIdentifier());
7848 if (const FunctionType *FT = QT->getAs<FunctionType>())
7849 return appendFunctionType(Enc, FT, CGM, TSC);
7854 static bool getTypeString(SmallStringEnc &Enc, const Decl *D,
7855 CodeGen::CodeGenModule &CGM, TypeStringCache &TSC) {
7859 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
7860 if (FD->getLanguageLinkage() != CLanguageLinkage)
7862 return appendType(Enc, FD->getType(), CGM, TSC);
7865 if (const VarDecl *VD = dyn_cast<VarDecl>(D)) {
7866 if (VD->getLanguageLinkage() != CLanguageLinkage)
7868 QualType QT = VD->getType().getCanonicalType();
7869 if (const ArrayType *AT = QT->getAsArrayTypeUnsafe()) {
7870 // Global ArrayTypes are given a size of '*' if the size is unknown.
7871 // The Qualifiers should be attached to the type rather than the array.
7872 // Thus we don't call appendQualifier() here.
7873 return appendArrayType(Enc, QT, AT, CGM, TSC, "*");
7875 return appendType(Enc, QT, CGM, TSC);
7881 //===----------------------------------------------------------------------===//
7883 //===----------------------------------------------------------------------===//
7885 const llvm::Triple &CodeGenModule::getTriple() const {
7886 return getTarget().getTriple();
7889 bool CodeGenModule::supportsCOMDAT() const {
7890 return getTriple().supportsCOMDAT();
7893 const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() {
7894 if (TheTargetCodeGenInfo)
7895 return *TheTargetCodeGenInfo;
7897 // Helper to set the unique_ptr while still keeping the return value.
7898 auto SetCGInfo = [&](TargetCodeGenInfo *P) -> const TargetCodeGenInfo & {
7899 this->TheTargetCodeGenInfo.reset(P);
7903 const llvm::Triple &Triple = getTarget().getTriple();
7904 switch (Triple.getArch()) {
7906 return SetCGInfo(new DefaultTargetCodeGenInfo(Types));
7908 case llvm::Triple::le32:
7909 return SetCGInfo(new PNaClTargetCodeGenInfo(Types));
7910 case llvm::Triple::mips:
7911 case llvm::Triple::mipsel:
7912 if (Triple.getOS() == llvm::Triple::NaCl)
7913 return SetCGInfo(new PNaClTargetCodeGenInfo(Types));
7914 return SetCGInfo(new MIPSTargetCodeGenInfo(Types, true));
7916 case llvm::Triple::mips64:
7917 case llvm::Triple::mips64el:
7918 return SetCGInfo(new MIPSTargetCodeGenInfo(Types, false));
7920 case llvm::Triple::aarch64:
7921 case llvm::Triple::aarch64_be: {
7922 AArch64ABIInfo::ABIKind Kind = AArch64ABIInfo::AAPCS;
7923 if (getTarget().getABI() == "darwinpcs")
7924 Kind = AArch64ABIInfo::DarwinPCS;
7926 return SetCGInfo(new AArch64TargetCodeGenInfo(Types, Kind));
7929 case llvm::Triple::wasm32:
7930 case llvm::Triple::wasm64:
7931 return SetCGInfo(new WebAssemblyTargetCodeGenInfo(Types));
7933 case llvm::Triple::arm:
7934 case llvm::Triple::armeb:
7935 case llvm::Triple::thumb:
7936 case llvm::Triple::thumbeb: {
7937 if (Triple.getOS() == llvm::Triple::Win32) {
7939 new WindowsARMTargetCodeGenInfo(Types, ARMABIInfo::AAPCS_VFP));
7942 ARMABIInfo::ABIKind Kind = ARMABIInfo::AAPCS;
7943 StringRef ABIStr = getTarget().getABI();
7944 if (ABIStr == "apcs-gnu")
7945 Kind = ARMABIInfo::APCS;
7946 else if (ABIStr == "aapcs16")
7947 Kind = ARMABIInfo::AAPCS16_VFP;
7948 else if (CodeGenOpts.FloatABI == "hard" ||
7949 (CodeGenOpts.FloatABI != "soft" &&
7950 (Triple.getEnvironment() == llvm::Triple::GNUEABIHF ||
7951 Triple.getEnvironment() == llvm::Triple::MuslEABIHF ||
7952 Triple.getEnvironment() == llvm::Triple::EABIHF)))
7953 Kind = ARMABIInfo::AAPCS_VFP;
7955 return SetCGInfo(new ARMTargetCodeGenInfo(Types, Kind));
7958 case llvm::Triple::ppc:
7960 new PPC32TargetCodeGenInfo(Types, CodeGenOpts.FloatABI == "soft"));
7961 case llvm::Triple::ppc64:
7962 if (Triple.isOSBinFormatELF()) {
7963 PPC64_SVR4_ABIInfo::ABIKind Kind = PPC64_SVR4_ABIInfo::ELFv1;
7964 if (getTarget().getABI() == "elfv2")
7965 Kind = PPC64_SVR4_ABIInfo::ELFv2;
7966 bool HasQPX = getTarget().getABI() == "elfv1-qpx";
7968 return SetCGInfo(new PPC64_SVR4_TargetCodeGenInfo(Types, Kind, HasQPX));
7970 return SetCGInfo(new PPC64TargetCodeGenInfo(Types));
7971 case llvm::Triple::ppc64le: {
7972 assert(Triple.isOSBinFormatELF() && "PPC64 LE non-ELF not supported!");
7973 PPC64_SVR4_ABIInfo::ABIKind Kind = PPC64_SVR4_ABIInfo::ELFv2;
7974 if (getTarget().getABI() == "elfv1" || getTarget().getABI() == "elfv1-qpx")
7975 Kind = PPC64_SVR4_ABIInfo::ELFv1;
7976 bool HasQPX = getTarget().getABI() == "elfv1-qpx";
7978 return SetCGInfo(new PPC64_SVR4_TargetCodeGenInfo(Types, Kind, HasQPX));
7981 case llvm::Triple::nvptx:
7982 case llvm::Triple::nvptx64:
7983 return SetCGInfo(new NVPTXTargetCodeGenInfo(Types));
7985 case llvm::Triple::msp430:
7986 return SetCGInfo(new MSP430TargetCodeGenInfo(Types));
7988 case llvm::Triple::systemz: {
7989 bool HasVector = getTarget().getABI() == "vector";
7990 return SetCGInfo(new SystemZTargetCodeGenInfo(Types, HasVector));
7993 case llvm::Triple::tce:
7994 return SetCGInfo(new TCETargetCodeGenInfo(Types));
7996 case llvm::Triple::x86: {
7997 bool IsDarwinVectorABI = Triple.isOSDarwin();
7998 bool RetSmallStructInRegABI =
7999 X86_32TargetCodeGenInfo::isStructReturnInRegABI(Triple, CodeGenOpts);
8000 bool IsWin32FloatStructABI = Triple.isOSWindows() && !Triple.isOSCygMing();
8002 if (Triple.getOS() == llvm::Triple::Win32) {
8003 return SetCGInfo(new WinX86_32TargetCodeGenInfo(
8004 Types, IsDarwinVectorABI, RetSmallStructInRegABI,
8005 IsWin32FloatStructABI, CodeGenOpts.NumRegisterParameters));
8007 return SetCGInfo(new X86_32TargetCodeGenInfo(
8008 Types, IsDarwinVectorABI, RetSmallStructInRegABI,
8009 IsWin32FloatStructABI, CodeGenOpts.NumRegisterParameters,
8010 CodeGenOpts.FloatABI == "soft"));
8014 case llvm::Triple::x86_64: {
8015 StringRef ABI = getTarget().getABI();
8016 X86AVXABILevel AVXLevel =
8018 ? X86AVXABILevel::AVX512
8019 : ABI == "avx" ? X86AVXABILevel::AVX : X86AVXABILevel::None);
8021 switch (Triple.getOS()) {
8022 case llvm::Triple::Win32:
8023 return SetCGInfo(new WinX86_64TargetCodeGenInfo(Types, AVXLevel));
8024 case llvm::Triple::PS4:
8025 return SetCGInfo(new PS4TargetCodeGenInfo(Types, AVXLevel));
8027 return SetCGInfo(new X86_64TargetCodeGenInfo(Types, AVXLevel));
8030 case llvm::Triple::hexagon:
8031 return SetCGInfo(new HexagonTargetCodeGenInfo(Types));
8032 case llvm::Triple::lanai:
8033 return SetCGInfo(new LanaiTargetCodeGenInfo(Types));
8034 case llvm::Triple::r600:
8035 return SetCGInfo(new AMDGPUTargetCodeGenInfo(Types));
8036 case llvm::Triple::amdgcn:
8037 return SetCGInfo(new AMDGPUTargetCodeGenInfo(Types));
8038 case llvm::Triple::sparc:
8039 return SetCGInfo(new SparcV8TargetCodeGenInfo(Types));
8040 case llvm::Triple::sparcv9:
8041 return SetCGInfo(new SparcV9TargetCodeGenInfo(Types));
8042 case llvm::Triple::xcore:
8043 return SetCGInfo(new XCoreTargetCodeGenInfo(Types));
8044 case llvm::Triple::spir:
8045 case llvm::Triple::spir64:
8046 return SetCGInfo(new SPIRTargetCodeGenInfo(Types));