1 //===---- TargetInfo.cpp - Encapsulate target details -----------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // These classes wrap the information about a call or function
11 // definition used to handle ABI compliancy.
13 //===----------------------------------------------------------------------===//
15 #include "TargetInfo.h"
20 #include "CodeGenFunction.h"
21 #include "clang/AST/RecordLayout.h"
22 #include "clang/CodeGen/CGFunctionInfo.h"
23 #include "clang/CodeGen/SwiftCallingConv.h"
24 #include "clang/Frontend/CodeGenOptions.h"
25 #include "llvm/ADT/StringExtras.h"
26 #include "llvm/ADT/StringSwitch.h"
27 #include "llvm/ADT/Triple.h"
28 #include "llvm/ADT/Twine.h"
29 #include "llvm/IR/DataLayout.h"
30 #include "llvm/IR/Type.h"
31 #include "llvm/Support/raw_ostream.h"
32 #include <algorithm> // std::sort
34 using namespace clang;
35 using namespace CodeGen;
37 // Helper for coercing an aggregate argument or return value into an integer
38 // array of the same size (including padding) and alignment. This alternate
39 // coercion happens only for the RenderScript ABI and can be removed after
40 // runtimes that rely on it are no longer supported.
42 // RenderScript assumes that the size of the argument / return value in the IR
43 // is the same as the size of the corresponding qualified type. This helper
44 // coerces the aggregate type into an array of the same size (including
45 // padding). This coercion is used in lieu of expansion of struct members or
46 // other canonical coercions that return a coerced-type of larger size.
48 // Ty - The argument / return value type
49 // Context - The associated ASTContext
50 // LLVMContext - The associated LLVMContext
51 static ABIArgInfo coerceToIntArray(QualType Ty,
53 llvm::LLVMContext &LLVMContext) {
54 // Alignment and Size are measured in bits.
55 const uint64_t Size = Context.getTypeSize(Ty);
56 const uint64_t Alignment = Context.getTypeAlign(Ty);
57 llvm::Type *IntType = llvm::Type::getIntNTy(LLVMContext, Alignment);
58 const uint64_t NumElements = (Size + Alignment - 1) / Alignment;
59 return ABIArgInfo::getDirect(llvm::ArrayType::get(IntType, NumElements));
62 static void AssignToArrayRange(CodeGen::CGBuilderTy &Builder,
67 // Alternatively, we could emit this as a loop in the source.
68 for (unsigned I = FirstIndex; I <= LastIndex; ++I) {
70 Builder.CreateConstInBoundsGEP1_32(Builder.getInt8Ty(), Array, I);
71 Builder.CreateAlignedStore(Value, Cell, CharUnits::One());
75 static bool isAggregateTypeForABI(QualType T) {
76 return !CodeGenFunction::hasScalarEvaluationKind(T) ||
77 T->isMemberFunctionPointerType();
81 ABIInfo::getNaturalAlignIndirect(QualType Ty, bool ByRef, bool Realign,
82 llvm::Type *Padding) const {
83 return ABIArgInfo::getIndirect(getContext().getTypeAlignInChars(Ty),
84 ByRef, Realign, Padding);
88 ABIInfo::getNaturalAlignIndirectInReg(QualType Ty, bool Realign) const {
89 return ABIArgInfo::getIndirectInReg(getContext().getTypeAlignInChars(Ty),
90 /*ByRef*/ false, Realign);
93 Address ABIInfo::EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
95 return Address::invalid();
98 ABIInfo::~ABIInfo() {}
100 /// Does the given lowering require more than the given number of
101 /// registers when expanded?
103 /// This is intended to be the basis of a reasonable basic implementation
104 /// of should{Pass,Return}IndirectlyForSwift.
106 /// For most targets, a limit of four total registers is reasonable; this
107 /// limits the amount of code required in order to move around the value
108 /// in case it wasn't produced immediately prior to the call by the caller
109 /// (or wasn't produced in exactly the right registers) or isn't used
110 /// immediately within the callee. But some targets may need to further
111 /// limit the register count due to an inability to support that many
112 /// return registers.
113 static bool occupiesMoreThan(CodeGenTypes &cgt,
114 ArrayRef<llvm::Type*> scalarTypes,
115 unsigned maxAllRegisters) {
116 unsigned intCount = 0, fpCount = 0;
117 for (llvm::Type *type : scalarTypes) {
118 if (type->isPointerTy()) {
120 } else if (auto intTy = dyn_cast<llvm::IntegerType>(type)) {
121 auto ptrWidth = cgt.getTarget().getPointerWidth(0);
122 intCount += (intTy->getBitWidth() + ptrWidth - 1) / ptrWidth;
124 assert(type->isVectorTy() || type->isFloatingPointTy());
129 return (intCount + fpCount > maxAllRegisters);
132 bool SwiftABIInfo::isLegalVectorTypeForSwift(CharUnits vectorSize,
134 unsigned numElts) const {
135 // The default implementation of this assumes that the target guarantees
136 // 128-bit SIMD support but nothing more.
137 return (vectorSize.getQuantity() > 8 && vectorSize.getQuantity() <= 16);
140 static CGCXXABI::RecordArgABI getRecordArgABI(const RecordType *RT,
142 const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl());
144 return CGCXXABI::RAA_Default;
145 return CXXABI.getRecordArgABI(RD);
148 static CGCXXABI::RecordArgABI getRecordArgABI(QualType T,
150 const RecordType *RT = T->getAs<RecordType>();
152 return CGCXXABI::RAA_Default;
153 return getRecordArgABI(RT, CXXABI);
156 /// Pass transparent unions as if they were the type of the first element. Sema
157 /// should ensure that all elements of the union have the same "machine type".
158 static QualType useFirstFieldIfTransparentUnion(QualType Ty) {
159 if (const RecordType *UT = Ty->getAsUnionType()) {
160 const RecordDecl *UD = UT->getDecl();
161 if (UD->hasAttr<TransparentUnionAttr>()) {
162 assert(!UD->field_empty() && "sema created an empty transparent union");
163 return UD->field_begin()->getType();
169 CGCXXABI &ABIInfo::getCXXABI() const {
170 return CGT.getCXXABI();
173 ASTContext &ABIInfo::getContext() const {
174 return CGT.getContext();
177 llvm::LLVMContext &ABIInfo::getVMContext() const {
178 return CGT.getLLVMContext();
181 const llvm::DataLayout &ABIInfo::getDataLayout() const {
182 return CGT.getDataLayout();
185 const TargetInfo &ABIInfo::getTarget() const {
186 return CGT.getTarget();
189 const CodeGenOptions &ABIInfo::getCodeGenOpts() const {
190 return CGT.getCodeGenOpts();
193 bool ABIInfo::isAndroid() const { return getTarget().getTriple().isAndroid(); }
195 bool ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
199 bool ABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base,
200 uint64_t Members) const {
204 bool ABIInfo::shouldSignExtUnsignedType(QualType Ty) const {
208 LLVM_DUMP_METHOD void ABIArgInfo::dump() const {
209 raw_ostream &OS = llvm::errs();
210 OS << "(ABIArgInfo Kind=";
213 OS << "Direct Type=";
214 if (llvm::Type *Ty = getCoerceToType())
226 OS << "InAlloca Offset=" << getInAllocaFieldIndex();
229 OS << "Indirect Align=" << getIndirectAlign().getQuantity()
230 << " ByVal=" << getIndirectByVal()
231 << " Realign=" << getIndirectRealign();
236 case CoerceAndExpand:
237 OS << "CoerceAndExpand Type=";
238 getCoerceAndExpandType()->print(OS);
244 // Dynamically round a pointer up to a multiple of the given alignment.
245 static llvm::Value *emitRoundPointerUpToAlignment(CodeGenFunction &CGF,
248 llvm::Value *PtrAsInt = Ptr;
249 // OverflowArgArea = (OverflowArgArea + Align - 1) & -Align;
250 PtrAsInt = CGF.Builder.CreatePtrToInt(PtrAsInt, CGF.IntPtrTy);
251 PtrAsInt = CGF.Builder.CreateAdd(PtrAsInt,
252 llvm::ConstantInt::get(CGF.IntPtrTy, Align.getQuantity() - 1));
253 PtrAsInt = CGF.Builder.CreateAnd(PtrAsInt,
254 llvm::ConstantInt::get(CGF.IntPtrTy, -Align.getQuantity()));
255 PtrAsInt = CGF.Builder.CreateIntToPtr(PtrAsInt,
257 Ptr->getName() + ".aligned");
261 /// Emit va_arg for a platform using the common void* representation,
262 /// where arguments are simply emitted in an array of slots on the stack.
264 /// This version implements the core direct-value passing rules.
266 /// \param SlotSize - The size and alignment of a stack slot.
267 /// Each argument will be allocated to a multiple of this number of
268 /// slots, and all the slots will be aligned to this value.
269 /// \param AllowHigherAlign - The slot alignment is not a cap;
270 /// an argument type with an alignment greater than the slot size
271 /// will be emitted on a higher-alignment address, potentially
272 /// leaving one or more empty slots behind as padding. If this
273 /// is false, the returned address might be less-aligned than
275 static Address emitVoidPtrDirectVAArg(CodeGenFunction &CGF,
277 llvm::Type *DirectTy,
278 CharUnits DirectSize,
279 CharUnits DirectAlign,
281 bool AllowHigherAlign) {
282 // Cast the element type to i8* if necessary. Some platforms define
283 // va_list as a struct containing an i8* instead of just an i8*.
284 if (VAListAddr.getElementType() != CGF.Int8PtrTy)
285 VAListAddr = CGF.Builder.CreateElementBitCast(VAListAddr, CGF.Int8PtrTy);
287 llvm::Value *Ptr = CGF.Builder.CreateLoad(VAListAddr, "argp.cur");
289 // If the CC aligns values higher than the slot size, do so if needed.
290 Address Addr = Address::invalid();
291 if (AllowHigherAlign && DirectAlign > SlotSize) {
292 Addr = Address(emitRoundPointerUpToAlignment(CGF, Ptr, DirectAlign),
295 Addr = Address(Ptr, SlotSize);
298 // Advance the pointer past the argument, then store that back.
299 CharUnits FullDirectSize = DirectSize.alignTo(SlotSize);
300 llvm::Value *NextPtr =
301 CGF.Builder.CreateConstInBoundsByteGEP(Addr.getPointer(), FullDirectSize,
303 CGF.Builder.CreateStore(NextPtr, VAListAddr);
305 // If the argument is smaller than a slot, and this is a big-endian
306 // target, the argument will be right-adjusted in its slot.
307 if (DirectSize < SlotSize && CGF.CGM.getDataLayout().isBigEndian() &&
308 !DirectTy->isStructTy()) {
309 Addr = CGF.Builder.CreateConstInBoundsByteGEP(Addr, SlotSize - DirectSize);
312 Addr = CGF.Builder.CreateElementBitCast(Addr, DirectTy);
316 /// Emit va_arg for a platform using the common void* representation,
317 /// where arguments are simply emitted in an array of slots on the stack.
319 /// \param IsIndirect - Values of this type are passed indirectly.
320 /// \param ValueInfo - The size and alignment of this type, generally
321 /// computed with getContext().getTypeInfoInChars(ValueTy).
322 /// \param SlotSizeAndAlign - The size and alignment of a stack slot.
323 /// Each argument will be allocated to a multiple of this number of
324 /// slots, and all the slots will be aligned to this value.
325 /// \param AllowHigherAlign - The slot alignment is not a cap;
326 /// an argument type with an alignment greater than the slot size
327 /// will be emitted on a higher-alignment address, potentially
328 /// leaving one or more empty slots behind as padding.
329 static Address emitVoidPtrVAArg(CodeGenFunction &CGF, Address VAListAddr,
330 QualType ValueTy, bool IsIndirect,
331 std::pair<CharUnits, CharUnits> ValueInfo,
332 CharUnits SlotSizeAndAlign,
333 bool AllowHigherAlign) {
334 // The size and alignment of the value that was passed directly.
335 CharUnits DirectSize, DirectAlign;
337 DirectSize = CGF.getPointerSize();
338 DirectAlign = CGF.getPointerAlign();
340 DirectSize = ValueInfo.first;
341 DirectAlign = ValueInfo.second;
344 // Cast the address we've calculated to the right type.
345 llvm::Type *DirectTy = CGF.ConvertTypeForMem(ValueTy);
347 DirectTy = DirectTy->getPointerTo(0);
349 Address Addr = emitVoidPtrDirectVAArg(CGF, VAListAddr, DirectTy,
350 DirectSize, DirectAlign,
355 Addr = Address(CGF.Builder.CreateLoad(Addr), ValueInfo.second);
362 static Address emitMergePHI(CodeGenFunction &CGF,
363 Address Addr1, llvm::BasicBlock *Block1,
364 Address Addr2, llvm::BasicBlock *Block2,
365 const llvm::Twine &Name = "") {
366 assert(Addr1.getType() == Addr2.getType());
367 llvm::PHINode *PHI = CGF.Builder.CreatePHI(Addr1.getType(), 2, Name);
368 PHI->addIncoming(Addr1.getPointer(), Block1);
369 PHI->addIncoming(Addr2.getPointer(), Block2);
370 CharUnits Align = std::min(Addr1.getAlignment(), Addr2.getAlignment());
371 return Address(PHI, Align);
374 TargetCodeGenInfo::~TargetCodeGenInfo() { delete Info; }
376 // If someone can figure out a general rule for this, that would be great.
377 // It's probably just doomed to be platform-dependent, though.
378 unsigned TargetCodeGenInfo::getSizeOfUnwindException() const {
380 // x86-64 FreeBSD, Linux, Darwin
381 // x86-32 FreeBSD, Linux, Darwin
382 // PowerPC Linux, Darwin
383 // ARM Darwin (*not* EABI)
388 bool TargetCodeGenInfo::isNoProtoCallVariadic(const CallArgList &args,
389 const FunctionNoProtoType *fnType) const {
390 // The following conventions are known to require this to be false:
393 // For everything else, we just prefer false unless we opt out.
398 TargetCodeGenInfo::getDependentLibraryOption(llvm::StringRef Lib,
399 llvm::SmallString<24> &Opt) const {
400 // This assumes the user is passing a library name like "rt" instead of a
401 // filename like "librt.a/so", and that they don't care whether it's static or
407 unsigned TargetCodeGenInfo::getOpenCLKernelCallingConv() const {
408 // OpenCL kernels are called via an explicit runtime API with arguments
409 // set with clSetKernelArg(), not as normal sub-functions.
410 // Return SPIR_KERNEL by default as the kernel calling convention to
411 // ensure the fingerprint is fixed such way that each OpenCL argument
412 // gets one matching argument in the produced kernel function argument
413 // list to enable feasible implementation of clSetKernelArg() with
414 // aggregates etc. In case we would use the default C calling conv here,
415 // clSetKernelArg() might break depending on the target-specific
416 // conventions; different targets might split structs passed as values
417 // to multiple function arguments etc.
418 return llvm::CallingConv::SPIR_KERNEL;
421 llvm::Constant *TargetCodeGenInfo::getNullPointer(const CodeGen::CodeGenModule &CGM,
422 llvm::PointerType *T, QualType QT) const {
423 return llvm::ConstantPointerNull::get(T);
426 LangAS TargetCodeGenInfo::getGlobalVarAddressSpace(CodeGenModule &CGM,
427 const VarDecl *D) const {
428 assert(!CGM.getLangOpts().OpenCL &&
429 !(CGM.getLangOpts().CUDA && CGM.getLangOpts().CUDAIsDevice) &&
430 "Address space agnostic languages only");
431 return D ? D->getType().getAddressSpace() : LangAS::Default;
434 llvm::Value *TargetCodeGenInfo::performAddrSpaceCast(
435 CodeGen::CodeGenFunction &CGF, llvm::Value *Src, LangAS SrcAddr,
436 LangAS DestAddr, llvm::Type *DestTy, bool isNonNull) const {
437 // Since target may map different address spaces in AST to the same address
438 // space, an address space conversion may end up as a bitcast.
439 if (auto *C = dyn_cast<llvm::Constant>(Src))
440 return performAddrSpaceCast(CGF.CGM, C, SrcAddr, DestAddr, DestTy);
441 return CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(Src, DestTy);
445 TargetCodeGenInfo::performAddrSpaceCast(CodeGenModule &CGM, llvm::Constant *Src,
446 LangAS SrcAddr, LangAS DestAddr,
447 llvm::Type *DestTy) const {
448 // Since target may map different address spaces in AST to the same address
449 // space, an address space conversion may end up as a bitcast.
450 return llvm::ConstantExpr::getPointerCast(Src, DestTy);
454 TargetCodeGenInfo::getLLVMSyncScopeID(SyncScope S, llvm::LLVMContext &C) const {
455 return C.getOrInsertSyncScopeID(""); /* default sync scope */
458 static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays);
460 /// isEmptyField - Return true iff a the field is "empty", that is it
461 /// is an unnamed bit-field or an (array of) empty record(s).
462 static bool isEmptyField(ASTContext &Context, const FieldDecl *FD,
464 if (FD->isUnnamedBitfield())
467 QualType FT = FD->getType();
469 // Constant arrays of empty records count as empty, strip them off.
470 // Constant arrays of zero length always count as empty.
472 while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) {
473 if (AT->getSize() == 0)
475 FT = AT->getElementType();
478 const RecordType *RT = FT->getAs<RecordType>();
482 // C++ record fields are never empty, at least in the Itanium ABI.
484 // FIXME: We should use a predicate for whether this behavior is true in the
486 if (isa<CXXRecordDecl>(RT->getDecl()))
489 return isEmptyRecord(Context, FT, AllowArrays);
492 /// isEmptyRecord - Return true iff a structure contains only empty
493 /// fields. Note that a structure with a flexible array member is not
494 /// considered empty.
495 static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays) {
496 const RecordType *RT = T->getAs<RecordType>();
499 const RecordDecl *RD = RT->getDecl();
500 if (RD->hasFlexibleArrayMember())
503 // If this is a C++ record, check the bases first.
504 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
505 for (const auto &I : CXXRD->bases())
506 if (!isEmptyRecord(Context, I.getType(), true))
509 for (const auto *I : RD->fields())
510 if (!isEmptyField(Context, I, AllowArrays))
515 /// isSingleElementStruct - Determine if a structure is a "single
516 /// element struct", i.e. it has exactly one non-empty field or
517 /// exactly one field which is itself a single element
518 /// struct. Structures with flexible array members are never
519 /// considered single element structs.
521 /// \return The field declaration for the single non-empty field, if
523 static const Type *isSingleElementStruct(QualType T, ASTContext &Context) {
524 const RecordType *RT = T->getAs<RecordType>();
528 const RecordDecl *RD = RT->getDecl();
529 if (RD->hasFlexibleArrayMember())
532 const Type *Found = nullptr;
534 // If this is a C++ record, check the bases first.
535 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
536 for (const auto &I : CXXRD->bases()) {
537 // Ignore empty records.
538 if (isEmptyRecord(Context, I.getType(), true))
541 // If we already found an element then this isn't a single-element struct.
545 // If this is non-empty and not a single element struct, the composite
546 // cannot be a single element struct.
547 Found = isSingleElementStruct(I.getType(), Context);
553 // Check for single element.
554 for (const auto *FD : RD->fields()) {
555 QualType FT = FD->getType();
557 // Ignore empty fields.
558 if (isEmptyField(Context, FD, true))
561 // If we already found an element then this isn't a single-element
566 // Treat single element arrays as the element.
567 while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) {
568 if (AT->getSize().getZExtValue() != 1)
570 FT = AT->getElementType();
573 if (!isAggregateTypeForABI(FT)) {
574 Found = FT.getTypePtr();
576 Found = isSingleElementStruct(FT, Context);
582 // We don't consider a struct a single-element struct if it has
583 // padding beyond the element type.
584 if (Found && Context.getTypeSize(Found) != Context.getTypeSize(T))
591 Address EmitVAArgInstr(CodeGenFunction &CGF, Address VAListAddr, QualType Ty,
592 const ABIArgInfo &AI) {
593 // This default implementation defers to the llvm backend's va_arg
594 // instruction. It can handle only passing arguments directly
595 // (typically only handled in the backend for primitive types), or
596 // aggregates passed indirectly by pointer (NOTE: if the "byval"
597 // flag has ABI impact in the callee, this implementation cannot
600 // Only a few cases are covered here at the moment -- those needed
601 // by the default abi.
604 if (AI.isIndirect()) {
605 assert(!AI.getPaddingType() &&
606 "Unexpected PaddingType seen in arginfo in generic VAArg emitter!");
608 !AI.getIndirectRealign() &&
609 "Unexpected IndirectRealign seen in arginfo in generic VAArg emitter!");
611 auto TyInfo = CGF.getContext().getTypeInfoInChars(Ty);
612 CharUnits TyAlignForABI = TyInfo.second;
615 llvm::PointerType::getUnqual(CGF.ConvertTypeForMem(Ty));
617 CGF.Builder.CreateVAArg(VAListAddr.getPointer(), BaseTy);
618 return Address(Addr, TyAlignForABI);
620 assert((AI.isDirect() || AI.isExtend()) &&
621 "Unexpected ArgInfo Kind in generic VAArg emitter!");
623 assert(!AI.getInReg() &&
624 "Unexpected InReg seen in arginfo in generic VAArg emitter!");
625 assert(!AI.getPaddingType() &&
626 "Unexpected PaddingType seen in arginfo in generic VAArg emitter!");
627 assert(!AI.getDirectOffset() &&
628 "Unexpected DirectOffset seen in arginfo in generic VAArg emitter!");
629 assert(!AI.getCoerceToType() &&
630 "Unexpected CoerceToType seen in arginfo in generic VAArg emitter!");
632 Address Temp = CGF.CreateMemTemp(Ty, "varet");
633 Val = CGF.Builder.CreateVAArg(VAListAddr.getPointer(), CGF.ConvertType(Ty));
634 CGF.Builder.CreateStore(Val, Temp);
639 /// DefaultABIInfo - The default implementation for ABI specific
640 /// details. This implementation provides information which results in
641 /// self-consistent and sensible LLVM IR generation, but does not
642 /// conform to any particular ABI.
643 class DefaultABIInfo : public ABIInfo {
645 DefaultABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {}
647 ABIArgInfo classifyReturnType(QualType RetTy) const;
648 ABIArgInfo classifyArgumentType(QualType RetTy) const;
650 void computeInfo(CGFunctionInfo &FI) const override {
651 if (!getCXXABI().classifyReturnType(FI))
652 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
653 for (auto &I : FI.arguments())
654 I.info = classifyArgumentType(I.type);
657 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
658 QualType Ty) const override {
659 return EmitVAArgInstr(CGF, VAListAddr, Ty, classifyArgumentType(Ty));
663 class DefaultTargetCodeGenInfo : public TargetCodeGenInfo {
665 DefaultTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
666 : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {}
669 ABIArgInfo DefaultABIInfo::classifyArgumentType(QualType Ty) const {
670 Ty = useFirstFieldIfTransparentUnion(Ty);
672 if (isAggregateTypeForABI(Ty)) {
673 // Records with non-trivial destructors/copy-constructors should not be
675 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
676 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
678 return getNaturalAlignIndirect(Ty);
681 // Treat an enum type as its underlying type.
682 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
683 Ty = EnumTy->getDecl()->getIntegerType();
685 return (Ty->isPromotableIntegerType() ?
686 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
689 ABIArgInfo DefaultABIInfo::classifyReturnType(QualType RetTy) const {
690 if (RetTy->isVoidType())
691 return ABIArgInfo::getIgnore();
693 if (isAggregateTypeForABI(RetTy))
694 return getNaturalAlignIndirect(RetTy);
696 // Treat an enum type as its underlying type.
697 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
698 RetTy = EnumTy->getDecl()->getIntegerType();
700 return (RetTy->isPromotableIntegerType() ?
701 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
704 //===----------------------------------------------------------------------===//
705 // WebAssembly ABI Implementation
707 // This is a very simple ABI that relies a lot on DefaultABIInfo.
708 //===----------------------------------------------------------------------===//
710 class WebAssemblyABIInfo final : public DefaultABIInfo {
712 explicit WebAssemblyABIInfo(CodeGen::CodeGenTypes &CGT)
713 : DefaultABIInfo(CGT) {}
716 ABIArgInfo classifyReturnType(QualType RetTy) const;
717 ABIArgInfo classifyArgumentType(QualType Ty) const;
719 // DefaultABIInfo's classifyReturnType and classifyArgumentType are
720 // non-virtual, but computeInfo and EmitVAArg are virtual, so we
722 void computeInfo(CGFunctionInfo &FI) const override {
723 if (!getCXXABI().classifyReturnType(FI))
724 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
725 for (auto &Arg : FI.arguments())
726 Arg.info = classifyArgumentType(Arg.type);
729 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
730 QualType Ty) const override;
733 class WebAssemblyTargetCodeGenInfo final : public TargetCodeGenInfo {
735 explicit WebAssemblyTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
736 : TargetCodeGenInfo(new WebAssemblyABIInfo(CGT)) {}
739 /// \brief Classify argument of given type \p Ty.
740 ABIArgInfo WebAssemblyABIInfo::classifyArgumentType(QualType Ty) const {
741 Ty = useFirstFieldIfTransparentUnion(Ty);
743 if (isAggregateTypeForABI(Ty)) {
744 // Records with non-trivial destructors/copy-constructors should not be
746 if (auto RAA = getRecordArgABI(Ty, getCXXABI()))
747 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
748 // Ignore empty structs/unions.
749 if (isEmptyRecord(getContext(), Ty, true))
750 return ABIArgInfo::getIgnore();
751 // Lower single-element structs to just pass a regular value. TODO: We
752 // could do reasonable-size multiple-element structs too, using getExpand(),
753 // though watch out for things like bitfields.
754 if (const Type *SeltTy = isSingleElementStruct(Ty, getContext()))
755 return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
758 // Otherwise just do the default thing.
759 return DefaultABIInfo::classifyArgumentType(Ty);
762 ABIArgInfo WebAssemblyABIInfo::classifyReturnType(QualType RetTy) const {
763 if (isAggregateTypeForABI(RetTy)) {
764 // Records with non-trivial destructors/copy-constructors should not be
765 // returned by value.
766 if (!getRecordArgABI(RetTy, getCXXABI())) {
767 // Ignore empty structs/unions.
768 if (isEmptyRecord(getContext(), RetTy, true))
769 return ABIArgInfo::getIgnore();
770 // Lower single-element structs to just return a regular value. TODO: We
771 // could do reasonable-size multiple-element structs too, using
772 // ABIArgInfo::getDirect().
773 if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext()))
774 return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
778 // Otherwise just do the default thing.
779 return DefaultABIInfo::classifyReturnType(RetTy);
782 Address WebAssemblyABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
784 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*Indirect=*/ false,
785 getContext().getTypeInfoInChars(Ty),
786 CharUnits::fromQuantity(4),
787 /*AllowHigherAlign=*/ true);
790 //===----------------------------------------------------------------------===//
791 // le32/PNaCl bitcode ABI Implementation
793 // This is a simplified version of the x86_32 ABI. Arguments and return values
794 // are always passed on the stack.
795 //===----------------------------------------------------------------------===//
797 class PNaClABIInfo : public ABIInfo {
799 PNaClABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {}
801 ABIArgInfo classifyReturnType(QualType RetTy) const;
802 ABIArgInfo classifyArgumentType(QualType RetTy) const;
804 void computeInfo(CGFunctionInfo &FI) const override;
805 Address EmitVAArg(CodeGenFunction &CGF,
806 Address VAListAddr, QualType Ty) const override;
809 class PNaClTargetCodeGenInfo : public TargetCodeGenInfo {
811 PNaClTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
812 : TargetCodeGenInfo(new PNaClABIInfo(CGT)) {}
815 void PNaClABIInfo::computeInfo(CGFunctionInfo &FI) const {
816 if (!getCXXABI().classifyReturnType(FI))
817 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
819 for (auto &I : FI.arguments())
820 I.info = classifyArgumentType(I.type);
823 Address PNaClABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
825 // The PNaCL ABI is a bit odd, in that varargs don't use normal
826 // function classification. Structs get passed directly for varargs
827 // functions, through a rewriting transform in
828 // pnacl-llvm/lib/Transforms/NaCl/ExpandVarArgs.cpp, which allows
829 // this target to actually support a va_arg instructions with an
830 // aggregate type, unlike other targets.
831 return EmitVAArgInstr(CGF, VAListAddr, Ty, ABIArgInfo::getDirect());
834 /// \brief Classify argument of given type \p Ty.
835 ABIArgInfo PNaClABIInfo::classifyArgumentType(QualType Ty) const {
836 if (isAggregateTypeForABI(Ty)) {
837 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
838 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
839 return getNaturalAlignIndirect(Ty);
840 } else if (const EnumType *EnumTy = Ty->getAs<EnumType>()) {
841 // Treat an enum type as its underlying type.
842 Ty = EnumTy->getDecl()->getIntegerType();
843 } else if (Ty->isFloatingType()) {
844 // Floating-point types don't go inreg.
845 return ABIArgInfo::getDirect();
848 return (Ty->isPromotableIntegerType() ?
849 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
852 ABIArgInfo PNaClABIInfo::classifyReturnType(QualType RetTy) const {
853 if (RetTy->isVoidType())
854 return ABIArgInfo::getIgnore();
856 // In the PNaCl ABI we always return records/structures on the stack.
857 if (isAggregateTypeForABI(RetTy))
858 return getNaturalAlignIndirect(RetTy);
860 // Treat an enum type as its underlying type.
861 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
862 RetTy = EnumTy->getDecl()->getIntegerType();
864 return (RetTy->isPromotableIntegerType() ?
865 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
868 /// IsX86_MMXType - Return true if this is an MMX type.
869 bool IsX86_MMXType(llvm::Type *IRType) {
870 // Return true if the type is an MMX type <2 x i32>, <4 x i16>, or <8 x i8>.
871 return IRType->isVectorTy() && IRType->getPrimitiveSizeInBits() == 64 &&
872 cast<llvm::VectorType>(IRType)->getElementType()->isIntegerTy() &&
873 IRType->getScalarSizeInBits() != 64;
876 static llvm::Type* X86AdjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
877 StringRef Constraint,
879 bool IsMMXCons = llvm::StringSwitch<bool>(Constraint)
880 .Cases("y", "&y", "^Ym", true)
882 if (IsMMXCons && Ty->isVectorTy()) {
883 if (cast<llvm::VectorType>(Ty)->getBitWidth() != 64) {
884 // Invalid MMX constraint
888 return llvm::Type::getX86_MMXTy(CGF.getLLVMContext());
891 // No operation needed
895 /// Returns true if this type can be passed in SSE registers with the
896 /// X86_VectorCall calling convention. Shared between x86_32 and x86_64.
897 static bool isX86VectorTypeForVectorCall(ASTContext &Context, QualType Ty) {
898 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
899 if (BT->isFloatingPoint() && BT->getKind() != BuiltinType::Half) {
900 if (BT->getKind() == BuiltinType::LongDouble) {
901 if (&Context.getTargetInfo().getLongDoubleFormat() ==
902 &llvm::APFloat::x87DoubleExtended())
907 } else if (const VectorType *VT = Ty->getAs<VectorType>()) {
908 // vectorcall can pass XMM, YMM, and ZMM vectors. We don't pass SSE1 MMX
909 // registers specially.
910 unsigned VecSize = Context.getTypeSize(VT);
911 if (VecSize == 128 || VecSize == 256 || VecSize == 512)
917 /// Returns true if this aggregate is small enough to be passed in SSE registers
918 /// in the X86_VectorCall calling convention. Shared between x86_32 and x86_64.
919 static bool isX86VectorCallAggregateSmallEnough(uint64_t NumMembers) {
920 return NumMembers <= 4;
923 /// Returns a Homogeneous Vector Aggregate ABIArgInfo, used in X86.
924 static ABIArgInfo getDirectX86Hva(llvm::Type* T = nullptr) {
925 auto AI = ABIArgInfo::getDirect(T);
927 AI.setCanBeFlattened(false);
931 //===----------------------------------------------------------------------===//
932 // X86-32 ABI Implementation
933 //===----------------------------------------------------------------------===//
935 /// \brief Similar to llvm::CCState, but for Clang.
937 CCState(unsigned CC) : CC(CC), FreeRegs(0), FreeSSERegs(0) {}
941 unsigned FreeSSERegs;
945 // Vectorcall only allows the first 6 parameters to be passed in registers.
946 VectorcallMaxParamNumAsReg = 6
949 /// X86_32ABIInfo - The X86-32 ABI information.
950 class X86_32ABIInfo : public SwiftABIInfo {
956 static const unsigned MinABIStackAlignInBytes = 4;
958 bool IsDarwinVectorABI;
959 bool IsRetSmallStructInRegABI;
960 bool IsWin32StructABI;
963 unsigned DefaultNumRegisterParameters;
965 static bool isRegisterSize(unsigned Size) {
966 return (Size == 8 || Size == 16 || Size == 32 || Size == 64);
969 bool isHomogeneousAggregateBaseType(QualType Ty) const override {
970 // FIXME: Assumes vectorcall is in use.
971 return isX86VectorTypeForVectorCall(getContext(), Ty);
974 bool isHomogeneousAggregateSmallEnough(const Type *Ty,
975 uint64_t NumMembers) const override {
976 // FIXME: Assumes vectorcall is in use.
977 return isX86VectorCallAggregateSmallEnough(NumMembers);
980 bool shouldReturnTypeInRegister(QualType Ty, ASTContext &Context) const;
982 /// getIndirectResult - Give a source type \arg Ty, return a suitable result
983 /// such that the argument will be passed in memory.
984 ABIArgInfo getIndirectResult(QualType Ty, bool ByVal, CCState &State) const;
986 ABIArgInfo getIndirectReturnResult(QualType Ty, CCState &State) const;
988 /// \brief Return the alignment to use for the given type on the stack.
989 unsigned getTypeStackAlignInBytes(QualType Ty, unsigned Align) const;
991 Class classify(QualType Ty) const;
992 ABIArgInfo classifyReturnType(QualType RetTy, CCState &State) const;
993 ABIArgInfo classifyArgumentType(QualType RetTy, CCState &State) const;
995 /// \brief Updates the number of available free registers, returns
996 /// true if any registers were allocated.
997 bool updateFreeRegs(QualType Ty, CCState &State) const;
999 bool shouldAggregateUseDirect(QualType Ty, CCState &State, bool &InReg,
1000 bool &NeedsPadding) const;
1001 bool shouldPrimitiveUseInReg(QualType Ty, CCState &State) const;
1003 bool canExpandIndirectArgument(QualType Ty) const;
1005 /// \brief Rewrite the function info so that all memory arguments use
1007 void rewriteWithInAlloca(CGFunctionInfo &FI) const;
1009 void addFieldToArgStruct(SmallVector<llvm::Type *, 6> &FrameFields,
1010 CharUnits &StackOffset, ABIArgInfo &Info,
1011 QualType Type) const;
1012 void computeVectorCallArgs(CGFunctionInfo &FI, CCState &State,
1013 bool &UsedInAlloca) const;
1017 void computeInfo(CGFunctionInfo &FI) const override;
1018 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
1019 QualType Ty) const override;
1021 X86_32ABIInfo(CodeGen::CodeGenTypes &CGT, bool DarwinVectorABI,
1022 bool RetSmallStructInRegABI, bool Win32StructABI,
1023 unsigned NumRegisterParameters, bool SoftFloatABI)
1024 : SwiftABIInfo(CGT), IsDarwinVectorABI(DarwinVectorABI),
1025 IsRetSmallStructInRegABI(RetSmallStructInRegABI),
1026 IsWin32StructABI(Win32StructABI),
1027 IsSoftFloatABI(SoftFloatABI),
1028 IsMCUABI(CGT.getTarget().getTriple().isOSIAMCU()),
1029 DefaultNumRegisterParameters(NumRegisterParameters) {}
1031 bool shouldPassIndirectlyForSwift(CharUnits totalSize,
1032 ArrayRef<llvm::Type*> scalars,
1033 bool asReturnValue) const override {
1034 // LLVM's x86-32 lowering currently only assigns up to three
1035 // integer registers and three fp registers. Oddly, it'll use up to
1036 // four vector registers for vectors, but those can overlap with the
1037 // scalar registers.
1038 return occupiesMoreThan(CGT, scalars, /*total*/ 3);
1041 bool isSwiftErrorInRegister() const override {
1042 // x86-32 lowering does not support passing swifterror in a register.
1047 class X86_32TargetCodeGenInfo : public TargetCodeGenInfo {
1049 X86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool DarwinVectorABI,
1050 bool RetSmallStructInRegABI, bool Win32StructABI,
1051 unsigned NumRegisterParameters, bool SoftFloatABI)
1052 : TargetCodeGenInfo(new X86_32ABIInfo(
1053 CGT, DarwinVectorABI, RetSmallStructInRegABI, Win32StructABI,
1054 NumRegisterParameters, SoftFloatABI)) {}
1056 static bool isStructReturnInRegABI(
1057 const llvm::Triple &Triple, const CodeGenOptions &Opts);
1059 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
1060 CodeGen::CodeGenModule &CGM,
1061 ForDefinition_t IsForDefinition) const override;
1063 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
1064 // Darwin uses different dwarf register numbers for EH.
1065 if (CGM.getTarget().getTriple().isOSDarwin()) return 5;
1069 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
1070 llvm::Value *Address) const override;
1072 llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
1073 StringRef Constraint,
1074 llvm::Type* Ty) const override {
1075 return X86AdjustInlineAsmType(CGF, Constraint, Ty);
1078 void addReturnRegisterOutputs(CodeGenFunction &CGF, LValue ReturnValue,
1079 std::string &Constraints,
1080 std::vector<llvm::Type *> &ResultRegTypes,
1081 std::vector<llvm::Type *> &ResultTruncRegTypes,
1082 std::vector<LValue> &ResultRegDests,
1083 std::string &AsmString,
1084 unsigned NumOutputs) const override;
1087 getUBSanFunctionSignature(CodeGen::CodeGenModule &CGM) const override {
1088 unsigned Sig = (0xeb << 0) | // jmp rel8
1089 (0x06 << 8) | // .+0x08
1092 return llvm::ConstantInt::get(CGM.Int32Ty, Sig);
1095 StringRef getARCRetainAutoreleasedReturnValueMarker() const override {
1096 return "movl\t%ebp, %ebp"
1097 "\t\t// marker for objc_retainAutoreleaseReturnValue";
1103 /// Rewrite input constraint references after adding some output constraints.
1104 /// In the case where there is one output and one input and we add one output,
1105 /// we need to replace all operand references greater than or equal to 1:
1108 /// The result will be:
1111 static void rewriteInputConstraintReferences(unsigned FirstIn,
1112 unsigned NumNewOuts,
1113 std::string &AsmString) {
1115 llvm::raw_string_ostream OS(Buf);
1117 while (Pos < AsmString.size()) {
1118 size_t DollarStart = AsmString.find('$', Pos);
1119 if (DollarStart == std::string::npos)
1120 DollarStart = AsmString.size();
1121 size_t DollarEnd = AsmString.find_first_not_of('$', DollarStart);
1122 if (DollarEnd == std::string::npos)
1123 DollarEnd = AsmString.size();
1124 OS << StringRef(&AsmString[Pos], DollarEnd - Pos);
1126 size_t NumDollars = DollarEnd - DollarStart;
1127 if (NumDollars % 2 != 0 && Pos < AsmString.size()) {
1128 // We have an operand reference.
1129 size_t DigitStart = Pos;
1130 size_t DigitEnd = AsmString.find_first_not_of("0123456789", DigitStart);
1131 if (DigitEnd == std::string::npos)
1132 DigitEnd = AsmString.size();
1133 StringRef OperandStr(&AsmString[DigitStart], DigitEnd - DigitStart);
1134 unsigned OperandIndex;
1135 if (!OperandStr.getAsInteger(10, OperandIndex)) {
1136 if (OperandIndex >= FirstIn)
1137 OperandIndex += NumNewOuts;
1145 AsmString = std::move(OS.str());
1148 /// Add output constraints for EAX:EDX because they are return registers.
1149 void X86_32TargetCodeGenInfo::addReturnRegisterOutputs(
1150 CodeGenFunction &CGF, LValue ReturnSlot, std::string &Constraints,
1151 std::vector<llvm::Type *> &ResultRegTypes,
1152 std::vector<llvm::Type *> &ResultTruncRegTypes,
1153 std::vector<LValue> &ResultRegDests, std::string &AsmString,
1154 unsigned NumOutputs) const {
1155 uint64_t RetWidth = CGF.getContext().getTypeSize(ReturnSlot.getType());
1157 // Use the EAX constraint if the width is 32 or smaller and EAX:EDX if it is
1159 if (!Constraints.empty())
1161 if (RetWidth <= 32) {
1162 Constraints += "={eax}";
1163 ResultRegTypes.push_back(CGF.Int32Ty);
1165 // Use the 'A' constraint for EAX:EDX.
1166 Constraints += "=A";
1167 ResultRegTypes.push_back(CGF.Int64Ty);
1170 // Truncate EAX or EAX:EDX to an integer of the appropriate size.
1171 llvm::Type *CoerceTy = llvm::IntegerType::get(CGF.getLLVMContext(), RetWidth);
1172 ResultTruncRegTypes.push_back(CoerceTy);
1174 // Coerce the integer by bitcasting the return slot pointer.
1175 ReturnSlot.setAddress(CGF.Builder.CreateBitCast(ReturnSlot.getAddress(),
1176 CoerceTy->getPointerTo()));
1177 ResultRegDests.push_back(ReturnSlot);
1179 rewriteInputConstraintReferences(NumOutputs, 1, AsmString);
1182 /// shouldReturnTypeInRegister - Determine if the given type should be
1183 /// returned in a register (for the Darwin and MCU ABI).
1184 bool X86_32ABIInfo::shouldReturnTypeInRegister(QualType Ty,
1185 ASTContext &Context) const {
1186 uint64_t Size = Context.getTypeSize(Ty);
1188 // For i386, type must be register sized.
1189 // For the MCU ABI, it only needs to be <= 8-byte
1190 if ((IsMCUABI && Size > 64) || (!IsMCUABI && !isRegisterSize(Size)))
1193 if (Ty->isVectorType()) {
1194 // 64- and 128- bit vectors inside structures are not returned in
1196 if (Size == 64 || Size == 128)
1202 // If this is a builtin, pointer, enum, complex type, member pointer, or
1203 // member function pointer it is ok.
1204 if (Ty->getAs<BuiltinType>() || Ty->hasPointerRepresentation() ||
1205 Ty->isAnyComplexType() || Ty->isEnumeralType() ||
1206 Ty->isBlockPointerType() || Ty->isMemberPointerType())
1209 // Arrays are treated like records.
1210 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty))
1211 return shouldReturnTypeInRegister(AT->getElementType(), Context);
1213 // Otherwise, it must be a record type.
1214 const RecordType *RT = Ty->getAs<RecordType>();
1215 if (!RT) return false;
1217 // FIXME: Traverse bases here too.
1219 // Structure types are passed in register if all fields would be
1220 // passed in a register.
1221 for (const auto *FD : RT->getDecl()->fields()) {
1222 // Empty fields are ignored.
1223 if (isEmptyField(Context, FD, true))
1226 // Check fields recursively.
1227 if (!shouldReturnTypeInRegister(FD->getType(), Context))
1233 static bool is32Or64BitBasicType(QualType Ty, ASTContext &Context) {
1234 // Treat complex types as the element type.
1235 if (const ComplexType *CTy = Ty->getAs<ComplexType>())
1236 Ty = CTy->getElementType();
1238 // Check for a type which we know has a simple scalar argument-passing
1239 // convention without any padding. (We're specifically looking for 32
1240 // and 64-bit integer and integer-equivalents, float, and double.)
1241 if (!Ty->getAs<BuiltinType>() && !Ty->hasPointerRepresentation() &&
1242 !Ty->isEnumeralType() && !Ty->isBlockPointerType())
1245 uint64_t Size = Context.getTypeSize(Ty);
1246 return Size == 32 || Size == 64;
1249 static bool addFieldSizes(ASTContext &Context, const RecordDecl *RD,
1251 for (const auto *FD : RD->fields()) {
1252 // Scalar arguments on the stack get 4 byte alignment on x86. If the
1253 // argument is smaller than 32-bits, expanding the struct will create
1254 // alignment padding.
1255 if (!is32Or64BitBasicType(FD->getType(), Context))
1258 // FIXME: Reject bit-fields wholesale; there are two problems, we don't know
1259 // how to expand them yet, and the predicate for telling if a bitfield still
1260 // counts as "basic" is more complicated than what we were doing previously.
1261 if (FD->isBitField())
1264 Size += Context.getTypeSize(FD->getType());
1269 static bool addBaseAndFieldSizes(ASTContext &Context, const CXXRecordDecl *RD,
1271 // Don't do this if there are any non-empty bases.
1272 for (const CXXBaseSpecifier &Base : RD->bases()) {
1273 if (!addBaseAndFieldSizes(Context, Base.getType()->getAsCXXRecordDecl(),
1277 if (!addFieldSizes(Context, RD, Size))
1282 /// Test whether an argument type which is to be passed indirectly (on the
1283 /// stack) would have the equivalent layout if it was expanded into separate
1284 /// arguments. If so, we prefer to do the latter to avoid inhibiting
1286 bool X86_32ABIInfo::canExpandIndirectArgument(QualType Ty) const {
1287 // We can only expand structure types.
1288 const RecordType *RT = Ty->getAs<RecordType>();
1291 const RecordDecl *RD = RT->getDecl();
1293 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
1294 if (!IsWin32StructABI) {
1295 // On non-Windows, we have to conservatively match our old bitcode
1296 // prototypes in order to be ABI-compatible at the bitcode level.
1297 if (!CXXRD->isCLike())
1300 // Don't do this for dynamic classes.
1301 if (CXXRD->isDynamicClass())
1304 if (!addBaseAndFieldSizes(getContext(), CXXRD, Size))
1307 if (!addFieldSizes(getContext(), RD, Size))
1311 // We can do this if there was no alignment padding.
1312 return Size == getContext().getTypeSize(Ty);
1315 ABIArgInfo X86_32ABIInfo::getIndirectReturnResult(QualType RetTy, CCState &State) const {
1316 // If the return value is indirect, then the hidden argument is consuming one
1317 // integer register.
1318 if (State.FreeRegs) {
1321 return getNaturalAlignIndirectInReg(RetTy);
1323 return getNaturalAlignIndirect(RetTy, /*ByVal=*/false);
1326 ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy,
1327 CCState &State) const {
1328 if (RetTy->isVoidType())
1329 return ABIArgInfo::getIgnore();
1331 const Type *Base = nullptr;
1332 uint64_t NumElts = 0;
1333 if ((State.CC == llvm::CallingConv::X86_VectorCall ||
1334 State.CC == llvm::CallingConv::X86_RegCall) &&
1335 isHomogeneousAggregate(RetTy, Base, NumElts)) {
1336 // The LLVM struct type for such an aggregate should lower properly.
1337 return ABIArgInfo::getDirect();
1340 if (const VectorType *VT = RetTy->getAs<VectorType>()) {
1341 // On Darwin, some vectors are returned in registers.
1342 if (IsDarwinVectorABI) {
1343 uint64_t Size = getContext().getTypeSize(RetTy);
1345 // 128-bit vectors are a special case; they are returned in
1346 // registers and we need to make sure to pick a type the LLVM
1347 // backend will like.
1349 return ABIArgInfo::getDirect(llvm::VectorType::get(
1350 llvm::Type::getInt64Ty(getVMContext()), 2));
1352 // Always return in register if it fits in a general purpose
1353 // register, or if it is 64 bits and has a single element.
1354 if ((Size == 8 || Size == 16 || Size == 32) ||
1355 (Size == 64 && VT->getNumElements() == 1))
1356 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
1359 return getIndirectReturnResult(RetTy, State);
1362 return ABIArgInfo::getDirect();
1365 if (isAggregateTypeForABI(RetTy)) {
1366 if (const RecordType *RT = RetTy->getAs<RecordType>()) {
1367 // Structures with flexible arrays are always indirect.
1368 if (RT->getDecl()->hasFlexibleArrayMember())
1369 return getIndirectReturnResult(RetTy, State);
1372 // If specified, structs and unions are always indirect.
1373 if (!IsRetSmallStructInRegABI && !RetTy->isAnyComplexType())
1374 return getIndirectReturnResult(RetTy, State);
1376 // Ignore empty structs/unions.
1377 if (isEmptyRecord(getContext(), RetTy, true))
1378 return ABIArgInfo::getIgnore();
1380 // Small structures which are register sized are generally returned
1382 if (shouldReturnTypeInRegister(RetTy, getContext())) {
1383 uint64_t Size = getContext().getTypeSize(RetTy);
1385 // As a special-case, if the struct is a "single-element" struct, and
1386 // the field is of type "float" or "double", return it in a
1387 // floating-point register. (MSVC does not apply this special case.)
1388 // We apply a similar transformation for pointer types to improve the
1389 // quality of the generated IR.
1390 if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext()))
1391 if ((!IsWin32StructABI && SeltTy->isRealFloatingType())
1392 || SeltTy->hasPointerRepresentation())
1393 return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
1395 // FIXME: We should be able to narrow this integer in cases with dead
1397 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),Size));
1400 return getIndirectReturnResult(RetTy, State);
1403 // Treat an enum type as its underlying type.
1404 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
1405 RetTy = EnumTy->getDecl()->getIntegerType();
1407 return (RetTy->isPromotableIntegerType() ?
1408 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
1411 static bool isSSEVectorType(ASTContext &Context, QualType Ty) {
1412 return Ty->getAs<VectorType>() && Context.getTypeSize(Ty) == 128;
1415 static bool isRecordWithSSEVectorType(ASTContext &Context, QualType Ty) {
1416 const RecordType *RT = Ty->getAs<RecordType>();
1419 const RecordDecl *RD = RT->getDecl();
1421 // If this is a C++ record, check the bases first.
1422 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
1423 for (const auto &I : CXXRD->bases())
1424 if (!isRecordWithSSEVectorType(Context, I.getType()))
1427 for (const auto *i : RD->fields()) {
1428 QualType FT = i->getType();
1430 if (isSSEVectorType(Context, FT))
1433 if (isRecordWithSSEVectorType(Context, FT))
1440 unsigned X86_32ABIInfo::getTypeStackAlignInBytes(QualType Ty,
1441 unsigned Align) const {
1442 // Otherwise, if the alignment is less than or equal to the minimum ABI
1443 // alignment, just use the default; the backend will handle this.
1444 if (Align <= MinABIStackAlignInBytes)
1445 return 0; // Use default alignment.
1447 // On non-Darwin, the stack type alignment is always 4.
1448 if (!IsDarwinVectorABI) {
1449 // Set explicit alignment, since we may need to realign the top.
1450 return MinABIStackAlignInBytes;
1453 // Otherwise, if the type contains an SSE vector type, the alignment is 16.
1454 if (Align >= 16 && (isSSEVectorType(getContext(), Ty) ||
1455 isRecordWithSSEVectorType(getContext(), Ty)))
1458 return MinABIStackAlignInBytes;
1461 ABIArgInfo X86_32ABIInfo::getIndirectResult(QualType Ty, bool ByVal,
1462 CCState &State) const {
1464 if (State.FreeRegs) {
1465 --State.FreeRegs; // Non-byval indirects just use one pointer.
1467 return getNaturalAlignIndirectInReg(Ty);
1469 return getNaturalAlignIndirect(Ty, false);
1472 // Compute the byval alignment.
1473 unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8;
1474 unsigned StackAlign = getTypeStackAlignInBytes(Ty, TypeAlign);
1475 if (StackAlign == 0)
1476 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(4), /*ByVal=*/true);
1478 // If the stack alignment is less than the type alignment, realign the
1480 bool Realign = TypeAlign > StackAlign;
1481 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(StackAlign),
1482 /*ByVal=*/true, Realign);
1485 X86_32ABIInfo::Class X86_32ABIInfo::classify(QualType Ty) const {
1486 const Type *T = isSingleElementStruct(Ty, getContext());
1488 T = Ty.getTypePtr();
1490 if (const BuiltinType *BT = T->getAs<BuiltinType>()) {
1491 BuiltinType::Kind K = BT->getKind();
1492 if (K == BuiltinType::Float || K == BuiltinType::Double)
1498 bool X86_32ABIInfo::updateFreeRegs(QualType Ty, CCState &State) const {
1499 if (!IsSoftFloatABI) {
1500 Class C = classify(Ty);
1505 unsigned Size = getContext().getTypeSize(Ty);
1506 unsigned SizeInRegs = (Size + 31) / 32;
1508 if (SizeInRegs == 0)
1512 if (SizeInRegs > State.FreeRegs) {
1517 // The MCU psABI allows passing parameters in-reg even if there are
1518 // earlier parameters that are passed on the stack. Also,
1519 // it does not allow passing >8-byte structs in-register,
1520 // even if there are 3 free registers available.
1521 if (SizeInRegs > State.FreeRegs || SizeInRegs > 2)
1525 State.FreeRegs -= SizeInRegs;
1529 bool X86_32ABIInfo::shouldAggregateUseDirect(QualType Ty, CCState &State,
1531 bool &NeedsPadding) const {
1532 // On Windows, aggregates other than HFAs are never passed in registers, and
1533 // they do not consume register slots. Homogenous floating-point aggregates
1534 // (HFAs) have already been dealt with at this point.
1535 if (IsWin32StructABI && isAggregateTypeForABI(Ty))
1538 NeedsPadding = false;
1541 if (!updateFreeRegs(Ty, State))
1547 if (State.CC == llvm::CallingConv::X86_FastCall ||
1548 State.CC == llvm::CallingConv::X86_VectorCall ||
1549 State.CC == llvm::CallingConv::X86_RegCall) {
1550 if (getContext().getTypeSize(Ty) <= 32 && State.FreeRegs)
1551 NeedsPadding = true;
1559 bool X86_32ABIInfo::shouldPrimitiveUseInReg(QualType Ty, CCState &State) const {
1560 if (!updateFreeRegs(Ty, State))
1566 if (State.CC == llvm::CallingConv::X86_FastCall ||
1567 State.CC == llvm::CallingConv::X86_VectorCall ||
1568 State.CC == llvm::CallingConv::X86_RegCall) {
1569 if (getContext().getTypeSize(Ty) > 32)
1572 return (Ty->isIntegralOrEnumerationType() || Ty->isPointerType() ||
1573 Ty->isReferenceType());
1579 ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty,
1580 CCState &State) const {
1581 // FIXME: Set alignment on indirect arguments.
1583 Ty = useFirstFieldIfTransparentUnion(Ty);
1585 // Check with the C++ ABI first.
1586 const RecordType *RT = Ty->getAs<RecordType>();
1588 CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI());
1589 if (RAA == CGCXXABI::RAA_Indirect) {
1590 return getIndirectResult(Ty, false, State);
1591 } else if (RAA == CGCXXABI::RAA_DirectInMemory) {
1592 // The field index doesn't matter, we'll fix it up later.
1593 return ABIArgInfo::getInAlloca(/*FieldIndex=*/0);
1597 // Regcall uses the concept of a homogenous vector aggregate, similar
1598 // to other targets.
1599 const Type *Base = nullptr;
1600 uint64_t NumElts = 0;
1601 if (State.CC == llvm::CallingConv::X86_RegCall &&
1602 isHomogeneousAggregate(Ty, Base, NumElts)) {
1604 if (State.FreeSSERegs >= NumElts) {
1605 State.FreeSSERegs -= NumElts;
1606 if (Ty->isBuiltinType() || Ty->isVectorType())
1607 return ABIArgInfo::getDirect();
1608 return ABIArgInfo::getExpand();
1610 return getIndirectResult(Ty, /*ByVal=*/false, State);
1613 if (isAggregateTypeForABI(Ty)) {
1614 // Structures with flexible arrays are always indirect.
1615 // FIXME: This should not be byval!
1616 if (RT && RT->getDecl()->hasFlexibleArrayMember())
1617 return getIndirectResult(Ty, true, State);
1619 // Ignore empty structs/unions on non-Windows.
1620 if (!IsWin32StructABI && isEmptyRecord(getContext(), Ty, true))
1621 return ABIArgInfo::getIgnore();
1623 llvm::LLVMContext &LLVMContext = getVMContext();
1624 llvm::IntegerType *Int32 = llvm::Type::getInt32Ty(LLVMContext);
1625 bool NeedsPadding = false;
1627 if (shouldAggregateUseDirect(Ty, State, InReg, NeedsPadding)) {
1628 unsigned SizeInRegs = (getContext().getTypeSize(Ty) + 31) / 32;
1629 SmallVector<llvm::Type*, 3> Elements(SizeInRegs, Int32);
1630 llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements);
1632 return ABIArgInfo::getDirectInReg(Result);
1634 return ABIArgInfo::getDirect(Result);
1636 llvm::IntegerType *PaddingType = NeedsPadding ? Int32 : nullptr;
1638 // Expand small (<= 128-bit) record types when we know that the stack layout
1639 // of those arguments will match the struct. This is important because the
1640 // LLVM backend isn't smart enough to remove byval, which inhibits many
1642 // Don't do this for the MCU if there are still free integer registers
1643 // (see X86_64 ABI for full explanation).
1644 if (getContext().getTypeSize(Ty) <= 4 * 32 &&
1645 (!IsMCUABI || State.FreeRegs == 0) && canExpandIndirectArgument(Ty))
1646 return ABIArgInfo::getExpandWithPadding(
1647 State.CC == llvm::CallingConv::X86_FastCall ||
1648 State.CC == llvm::CallingConv::X86_VectorCall ||
1649 State.CC == llvm::CallingConv::X86_RegCall,
1652 return getIndirectResult(Ty, true, State);
1655 if (const VectorType *VT = Ty->getAs<VectorType>()) {
1656 // On Darwin, some vectors are passed in memory, we handle this by passing
1657 // it as an i8/i16/i32/i64.
1658 if (IsDarwinVectorABI) {
1659 uint64_t Size = getContext().getTypeSize(Ty);
1660 if ((Size == 8 || Size == 16 || Size == 32) ||
1661 (Size == 64 && VT->getNumElements() == 1))
1662 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
1666 if (IsX86_MMXType(CGT.ConvertType(Ty)))
1667 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 64));
1669 return ABIArgInfo::getDirect();
1673 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
1674 Ty = EnumTy->getDecl()->getIntegerType();
1676 bool InReg = shouldPrimitiveUseInReg(Ty, State);
1678 if (Ty->isPromotableIntegerType()) {
1680 return ABIArgInfo::getExtendInReg();
1681 return ABIArgInfo::getExtend();
1685 return ABIArgInfo::getDirectInReg();
1686 return ABIArgInfo::getDirect();
1689 void X86_32ABIInfo::computeVectorCallArgs(CGFunctionInfo &FI, CCState &State,
1690 bool &UsedInAlloca) const {
1691 // Vectorcall x86 works subtly different than in x64, so the format is
1692 // a bit different than the x64 version. First, all vector types (not HVAs)
1693 // are assigned, with the first 6 ending up in the YMM0-5 or XMM0-5 registers.
1694 // This differs from the x64 implementation, where the first 6 by INDEX get
1696 // After that, integers AND HVAs are assigned Left to Right in the same pass.
1697 // Integers are passed as ECX/EDX if one is available (in order). HVAs will
1698 // first take up the remaining YMM/XMM registers. If insufficient registers
1699 // remain but an integer register (ECX/EDX) is available, it will be passed
1700 // in that, else, on the stack.
1701 for (auto &I : FI.arguments()) {
1702 // First pass do all the vector types.
1703 const Type *Base = nullptr;
1704 uint64_t NumElts = 0;
1705 const QualType& Ty = I.type;
1706 if ((Ty->isVectorType() || Ty->isBuiltinType()) &&
1707 isHomogeneousAggregate(Ty, Base, NumElts)) {
1708 if (State.FreeSSERegs >= NumElts) {
1709 State.FreeSSERegs -= NumElts;
1710 I.info = ABIArgInfo::getDirect();
1712 I.info = classifyArgumentType(Ty, State);
1714 UsedInAlloca |= (I.info.getKind() == ABIArgInfo::InAlloca);
1718 for (auto &I : FI.arguments()) {
1719 // Second pass, do the rest!
1720 const Type *Base = nullptr;
1721 uint64_t NumElts = 0;
1722 const QualType& Ty = I.type;
1723 bool IsHva = isHomogeneousAggregate(Ty, Base, NumElts);
1725 if (IsHva && !Ty->isVectorType() && !Ty->isBuiltinType()) {
1726 // Assign true HVAs (non vector/native FP types).
1727 if (State.FreeSSERegs >= NumElts) {
1728 State.FreeSSERegs -= NumElts;
1729 I.info = getDirectX86Hva();
1731 I.info = getIndirectResult(Ty, /*ByVal=*/false, State);
1733 } else if (!IsHva) {
1734 // Assign all Non-HVAs, so this will exclude Vector/FP args.
1735 I.info = classifyArgumentType(Ty, State);
1736 UsedInAlloca |= (I.info.getKind() == ABIArgInfo::InAlloca);
1741 void X86_32ABIInfo::computeInfo(CGFunctionInfo &FI) const {
1742 CCState State(FI.getCallingConvention());
1745 else if (State.CC == llvm::CallingConv::X86_FastCall)
1747 else if (State.CC == llvm::CallingConv::X86_VectorCall) {
1749 State.FreeSSERegs = 6;
1750 } else if (FI.getHasRegParm())
1751 State.FreeRegs = FI.getRegParm();
1752 else if (State.CC == llvm::CallingConv::X86_RegCall) {
1754 State.FreeSSERegs = 8;
1756 State.FreeRegs = DefaultNumRegisterParameters;
1758 if (!getCXXABI().classifyReturnType(FI)) {
1759 FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), State);
1760 } else if (FI.getReturnInfo().isIndirect()) {
1761 // The C++ ABI is not aware of register usage, so we have to check if the
1762 // return value was sret and put it in a register ourselves if appropriate.
1763 if (State.FreeRegs) {
1764 --State.FreeRegs; // The sret parameter consumes a register.
1766 FI.getReturnInfo().setInReg(true);
1770 // The chain argument effectively gives us another free register.
1771 if (FI.isChainCall())
1774 bool UsedInAlloca = false;
1775 if (State.CC == llvm::CallingConv::X86_VectorCall) {
1776 computeVectorCallArgs(FI, State, UsedInAlloca);
1778 // If not vectorcall, revert to normal behavior.
1779 for (auto &I : FI.arguments()) {
1780 I.info = classifyArgumentType(I.type, State);
1781 UsedInAlloca |= (I.info.getKind() == ABIArgInfo::InAlloca);
1785 // If we needed to use inalloca for any argument, do a second pass and rewrite
1786 // all the memory arguments to use inalloca.
1788 rewriteWithInAlloca(FI);
1792 X86_32ABIInfo::addFieldToArgStruct(SmallVector<llvm::Type *, 6> &FrameFields,
1793 CharUnits &StackOffset, ABIArgInfo &Info,
1794 QualType Type) const {
1795 // Arguments are always 4-byte-aligned.
1796 CharUnits FieldAlign = CharUnits::fromQuantity(4);
1798 assert(StackOffset.isMultipleOf(FieldAlign) && "unaligned inalloca struct");
1799 Info = ABIArgInfo::getInAlloca(FrameFields.size());
1800 FrameFields.push_back(CGT.ConvertTypeForMem(Type));
1801 StackOffset += getContext().getTypeSizeInChars(Type);
1803 // Insert padding bytes to respect alignment.
1804 CharUnits FieldEnd = StackOffset;
1805 StackOffset = FieldEnd.alignTo(FieldAlign);
1806 if (StackOffset != FieldEnd) {
1807 CharUnits NumBytes = StackOffset - FieldEnd;
1808 llvm::Type *Ty = llvm::Type::getInt8Ty(getVMContext());
1809 Ty = llvm::ArrayType::get(Ty, NumBytes.getQuantity());
1810 FrameFields.push_back(Ty);
1814 static bool isArgInAlloca(const ABIArgInfo &Info) {
1815 // Leave ignored and inreg arguments alone.
1816 switch (Info.getKind()) {
1817 case ABIArgInfo::InAlloca:
1819 case ABIArgInfo::Indirect:
1820 assert(Info.getIndirectByVal());
1822 case ABIArgInfo::Ignore:
1824 case ABIArgInfo::Direct:
1825 case ABIArgInfo::Extend:
1826 if (Info.getInReg())
1829 case ABIArgInfo::Expand:
1830 case ABIArgInfo::CoerceAndExpand:
1831 // These are aggregate types which are never passed in registers when
1832 // inalloca is involved.
1835 llvm_unreachable("invalid enum");
1838 void X86_32ABIInfo::rewriteWithInAlloca(CGFunctionInfo &FI) const {
1839 assert(IsWin32StructABI && "inalloca only supported on win32");
1841 // Build a packed struct type for all of the arguments in memory.
1842 SmallVector<llvm::Type *, 6> FrameFields;
1844 // The stack alignment is always 4.
1845 CharUnits StackAlign = CharUnits::fromQuantity(4);
1847 CharUnits StackOffset;
1848 CGFunctionInfo::arg_iterator I = FI.arg_begin(), E = FI.arg_end();
1850 // Put 'this' into the struct before 'sret', if necessary.
1852 FI.getCallingConvention() == llvm::CallingConv::X86_ThisCall;
1853 ABIArgInfo &Ret = FI.getReturnInfo();
1854 if (Ret.isIndirect() && Ret.isSRetAfterThis() && !IsThisCall &&
1855 isArgInAlloca(I->info)) {
1856 addFieldToArgStruct(FrameFields, StackOffset, I->info, I->type);
1860 // Put the sret parameter into the inalloca struct if it's in memory.
1861 if (Ret.isIndirect() && !Ret.getInReg()) {
1862 CanQualType PtrTy = getContext().getPointerType(FI.getReturnType());
1863 addFieldToArgStruct(FrameFields, StackOffset, Ret, PtrTy);
1864 // On Windows, the hidden sret parameter is always returned in eax.
1865 Ret.setInAllocaSRet(IsWin32StructABI);
1868 // Skip the 'this' parameter in ecx.
1872 // Put arguments passed in memory into the struct.
1873 for (; I != E; ++I) {
1874 if (isArgInAlloca(I->info))
1875 addFieldToArgStruct(FrameFields, StackOffset, I->info, I->type);
1878 FI.setArgStruct(llvm::StructType::get(getVMContext(), FrameFields,
1883 Address X86_32ABIInfo::EmitVAArg(CodeGenFunction &CGF,
1884 Address VAListAddr, QualType Ty) const {
1886 auto TypeInfo = getContext().getTypeInfoInChars(Ty);
1888 // x86-32 changes the alignment of certain arguments on the stack.
1890 // Just messing with TypeInfo like this works because we never pass
1891 // anything indirectly.
1892 TypeInfo.second = CharUnits::fromQuantity(
1893 getTypeStackAlignInBytes(Ty, TypeInfo.second.getQuantity()));
1895 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*Indirect*/ false,
1896 TypeInfo, CharUnits::fromQuantity(4),
1897 /*AllowHigherAlign*/ true);
1900 bool X86_32TargetCodeGenInfo::isStructReturnInRegABI(
1901 const llvm::Triple &Triple, const CodeGenOptions &Opts) {
1902 assert(Triple.getArch() == llvm::Triple::x86);
1904 switch (Opts.getStructReturnConvention()) {
1905 case CodeGenOptions::SRCK_Default:
1907 case CodeGenOptions::SRCK_OnStack: // -fpcc-struct-return
1909 case CodeGenOptions::SRCK_InRegs: // -freg-struct-return
1913 if (Triple.isOSDarwin() || Triple.isOSIAMCU())
1916 switch (Triple.getOS()) {
1917 case llvm::Triple::DragonFly:
1918 case llvm::Triple::FreeBSD:
1919 case llvm::Triple::OpenBSD:
1920 case llvm::Triple::Win32:
1927 void X86_32TargetCodeGenInfo::setTargetAttributes(
1928 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM,
1929 ForDefinition_t IsForDefinition) const {
1930 if (!IsForDefinition)
1932 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
1933 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
1934 // Get the LLVM function.
1935 llvm::Function *Fn = cast<llvm::Function>(GV);
1937 // Now add the 'alignstack' attribute with a value of 16.
1938 llvm::AttrBuilder B;
1939 B.addStackAlignmentAttr(16);
1940 Fn->addAttributes(llvm::AttributeList::FunctionIndex, B);
1942 if (FD->hasAttr<AnyX86InterruptAttr>()) {
1943 llvm::Function *Fn = cast<llvm::Function>(GV);
1944 Fn->setCallingConv(llvm::CallingConv::X86_INTR);
1949 bool X86_32TargetCodeGenInfo::initDwarfEHRegSizeTable(
1950 CodeGen::CodeGenFunction &CGF,
1951 llvm::Value *Address) const {
1952 CodeGen::CGBuilderTy &Builder = CGF.Builder;
1954 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4);
1956 // 0-7 are the eight integer registers; the order is different
1957 // on Darwin (for EH), but the range is the same.
1959 AssignToArrayRange(Builder, Address, Four8, 0, 8);
1961 if (CGF.CGM.getTarget().getTriple().isOSDarwin()) {
1962 // 12-16 are st(0..4). Not sure why we stop at 4.
1963 // These have size 16, which is sizeof(long double) on
1964 // platforms with 8-byte alignment for that type.
1965 llvm::Value *Sixteen8 = llvm::ConstantInt::get(CGF.Int8Ty, 16);
1966 AssignToArrayRange(Builder, Address, Sixteen8, 12, 16);
1969 // 9 is %eflags, which doesn't get a size on Darwin for some
1971 Builder.CreateAlignedStore(
1972 Four8, Builder.CreateConstInBoundsGEP1_32(CGF.Int8Ty, Address, 9),
1975 // 11-16 are st(0..5). Not sure why we stop at 5.
1976 // These have size 12, which is sizeof(long double) on
1977 // platforms with 4-byte alignment for that type.
1978 llvm::Value *Twelve8 = llvm::ConstantInt::get(CGF.Int8Ty, 12);
1979 AssignToArrayRange(Builder, Address, Twelve8, 11, 16);
1985 //===----------------------------------------------------------------------===//
1986 // X86-64 ABI Implementation
1987 //===----------------------------------------------------------------------===//
1991 /// The AVX ABI level for X86 targets.
1992 enum class X86AVXABILevel {
1998 /// \p returns the size in bits of the largest (native) vector for \p AVXLevel.
1999 static unsigned getNativeVectorSizeForAVXABI(X86AVXABILevel AVXLevel) {
2001 case X86AVXABILevel::AVX512:
2003 case X86AVXABILevel::AVX:
2005 case X86AVXABILevel::None:
2008 llvm_unreachable("Unknown AVXLevel");
2011 /// X86_64ABIInfo - The X86_64 ABI information.
2012 class X86_64ABIInfo : public SwiftABIInfo {
2024 /// merge - Implement the X86_64 ABI merging algorithm.
2026 /// Merge an accumulating classification \arg Accum with a field
2027 /// classification \arg Field.
2029 /// \param Accum - The accumulating classification. This should
2030 /// always be either NoClass or the result of a previous merge
2031 /// call. In addition, this should never be Memory (the caller
2032 /// should just return Memory for the aggregate).
2033 static Class merge(Class Accum, Class Field);
2035 /// postMerge - Implement the X86_64 ABI post merging algorithm.
2037 /// Post merger cleanup, reduces a malformed Hi and Lo pair to
2038 /// final MEMORY or SSE classes when necessary.
2040 /// \param AggregateSize - The size of the current aggregate in
2041 /// the classification process.
2043 /// \param Lo - The classification for the parts of the type
2044 /// residing in the low word of the containing object.
2046 /// \param Hi - The classification for the parts of the type
2047 /// residing in the higher words of the containing object.
2049 void postMerge(unsigned AggregateSize, Class &Lo, Class &Hi) const;
2051 /// classify - Determine the x86_64 register classes in which the
2052 /// given type T should be passed.
2054 /// \param Lo - The classification for the parts of the type
2055 /// residing in the low word of the containing object.
2057 /// \param Hi - The classification for the parts of the type
2058 /// residing in the high word of the containing object.
2060 /// \param OffsetBase - The bit offset of this type in the
2061 /// containing object. Some parameters are classified different
2062 /// depending on whether they straddle an eightbyte boundary.
2064 /// \param isNamedArg - Whether the argument in question is a "named"
2065 /// argument, as used in AMD64-ABI 3.5.7.
2067 /// If a word is unused its result will be NoClass; if a type should
2068 /// be passed in Memory then at least the classification of \arg Lo
2071 /// The \arg Lo class will be NoClass iff the argument is ignored.
2073 /// If the \arg Lo class is ComplexX87, then the \arg Hi class will
2074 /// also be ComplexX87.
2075 void classify(QualType T, uint64_t OffsetBase, Class &Lo, Class &Hi,
2076 bool isNamedArg) const;
2078 llvm::Type *GetByteVectorType(QualType Ty) const;
2079 llvm::Type *GetSSETypeAtOffset(llvm::Type *IRType,
2080 unsigned IROffset, QualType SourceTy,
2081 unsigned SourceOffset) const;
2082 llvm::Type *GetINTEGERTypeAtOffset(llvm::Type *IRType,
2083 unsigned IROffset, QualType SourceTy,
2084 unsigned SourceOffset) const;
2086 /// getIndirectResult - Give a source type \arg Ty, return a suitable result
2087 /// such that the argument will be returned in memory.
2088 ABIArgInfo getIndirectReturnResult(QualType Ty) const;
2090 /// getIndirectResult - Give a source type \arg Ty, return a suitable result
2091 /// such that the argument will be passed in memory.
2093 /// \param freeIntRegs - The number of free integer registers remaining
2095 ABIArgInfo getIndirectResult(QualType Ty, unsigned freeIntRegs) const;
2097 ABIArgInfo classifyReturnType(QualType RetTy) const;
2099 ABIArgInfo classifyArgumentType(QualType Ty, unsigned freeIntRegs,
2100 unsigned &neededInt, unsigned &neededSSE,
2101 bool isNamedArg) const;
2103 ABIArgInfo classifyRegCallStructType(QualType Ty, unsigned &NeededInt,
2104 unsigned &NeededSSE) const;
2106 ABIArgInfo classifyRegCallStructTypeImpl(QualType Ty, unsigned &NeededInt,
2107 unsigned &NeededSSE) const;
2109 bool IsIllegalVectorType(QualType Ty) const;
2111 /// The 0.98 ABI revision clarified a lot of ambiguities,
2112 /// unfortunately in ways that were not always consistent with
2113 /// certain previous compilers. In particular, platforms which
2114 /// required strict binary compatibility with older versions of GCC
2115 /// may need to exempt themselves.
2116 bool honorsRevision0_98() const {
2117 return !getTarget().getTriple().isOSDarwin();
2120 /// GCC classifies <1 x long long> as SSE but some platform ABIs choose to
2121 /// classify it as INTEGER (for compatibility with older clang compilers).
2122 bool classifyIntegerMMXAsSSE() const {
2123 // Clang <= 3.8 did not do this.
2124 if (getCodeGenOpts().getClangABICompat() <=
2125 CodeGenOptions::ClangABI::Ver3_8)
2128 const llvm::Triple &Triple = getTarget().getTriple();
2129 if (Triple.isOSDarwin() || Triple.getOS() == llvm::Triple::PS4)
2131 if (Triple.isOSFreeBSD() && Triple.getOSMajorVersion() >= 10)
2136 X86AVXABILevel AVXLevel;
2137 // Some ABIs (e.g. X32 ABI and Native Client OS) use 32 bit pointers on
2139 bool Has64BitPointers;
2142 X86_64ABIInfo(CodeGen::CodeGenTypes &CGT, X86AVXABILevel AVXLevel) :
2143 SwiftABIInfo(CGT), AVXLevel(AVXLevel),
2144 Has64BitPointers(CGT.getDataLayout().getPointerSize(0) == 8) {
2147 bool isPassedUsingAVXType(QualType type) const {
2148 unsigned neededInt, neededSSE;
2149 // The freeIntRegs argument doesn't matter here.
2150 ABIArgInfo info = classifyArgumentType(type, 0, neededInt, neededSSE,
2151 /*isNamedArg*/true);
2152 if (info.isDirect()) {
2153 llvm::Type *ty = info.getCoerceToType();
2154 if (llvm::VectorType *vectorTy = dyn_cast_or_null<llvm::VectorType>(ty))
2155 return (vectorTy->getBitWidth() > 128);
2160 void computeInfo(CGFunctionInfo &FI) const override;
2162 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
2163 QualType Ty) const override;
2164 Address EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
2165 QualType Ty) const override;
2167 bool has64BitPointers() const {
2168 return Has64BitPointers;
2171 bool shouldPassIndirectlyForSwift(CharUnits totalSize,
2172 ArrayRef<llvm::Type*> scalars,
2173 bool asReturnValue) const override {
2174 return occupiesMoreThan(CGT, scalars, /*total*/ 4);
2176 bool isSwiftErrorInRegister() const override {
2181 /// WinX86_64ABIInfo - The Windows X86_64 ABI information.
2182 class WinX86_64ABIInfo : public SwiftABIInfo {
2184 WinX86_64ABIInfo(CodeGen::CodeGenTypes &CGT)
2185 : SwiftABIInfo(CGT),
2186 IsMingw64(getTarget().getTriple().isWindowsGNUEnvironment()) {}
2188 void computeInfo(CGFunctionInfo &FI) const override;
2190 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
2191 QualType Ty) const override;
2193 bool isHomogeneousAggregateBaseType(QualType Ty) const override {
2194 // FIXME: Assumes vectorcall is in use.
2195 return isX86VectorTypeForVectorCall(getContext(), Ty);
2198 bool isHomogeneousAggregateSmallEnough(const Type *Ty,
2199 uint64_t NumMembers) const override {
2200 // FIXME: Assumes vectorcall is in use.
2201 return isX86VectorCallAggregateSmallEnough(NumMembers);
2204 bool shouldPassIndirectlyForSwift(CharUnits totalSize,
2205 ArrayRef<llvm::Type *> scalars,
2206 bool asReturnValue) const override {
2207 return occupiesMoreThan(CGT, scalars, /*total*/ 4);
2210 bool isSwiftErrorInRegister() const override {
2215 ABIArgInfo classify(QualType Ty, unsigned &FreeSSERegs, bool IsReturnType,
2216 bool IsVectorCall, bool IsRegCall) const;
2217 ABIArgInfo reclassifyHvaArgType(QualType Ty, unsigned &FreeSSERegs,
2218 const ABIArgInfo ¤t) const;
2219 void computeVectorCallArgs(CGFunctionInfo &FI, unsigned FreeSSERegs,
2220 bool IsVectorCall, bool IsRegCall) const;
2225 class X86_64TargetCodeGenInfo : public TargetCodeGenInfo {
2227 X86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, X86AVXABILevel AVXLevel)
2228 : TargetCodeGenInfo(new X86_64ABIInfo(CGT, AVXLevel)) {}
2230 const X86_64ABIInfo &getABIInfo() const {
2231 return static_cast<const X86_64ABIInfo&>(TargetCodeGenInfo::getABIInfo());
2234 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
2238 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
2239 llvm::Value *Address) const override {
2240 llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8);
2242 // 0-15 are the 16 integer registers.
2244 AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 16);
2248 llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
2249 StringRef Constraint,
2250 llvm::Type* Ty) const override {
2251 return X86AdjustInlineAsmType(CGF, Constraint, Ty);
2254 bool isNoProtoCallVariadic(const CallArgList &args,
2255 const FunctionNoProtoType *fnType) const override {
2256 // The default CC on x86-64 sets %al to the number of SSA
2257 // registers used, and GCC sets this when calling an unprototyped
2258 // function, so we override the default behavior. However, don't do
2259 // that when AVX types are involved: the ABI explicitly states it is
2260 // undefined, and it doesn't work in practice because of how the ABI
2261 // defines varargs anyway.
2262 if (fnType->getCallConv() == CC_C) {
2263 bool HasAVXType = false;
2264 for (CallArgList::const_iterator
2265 it = args.begin(), ie = args.end(); it != ie; ++it) {
2266 if (getABIInfo().isPassedUsingAVXType(it->Ty)) {
2276 return TargetCodeGenInfo::isNoProtoCallVariadic(args, fnType);
2280 getUBSanFunctionSignature(CodeGen::CodeGenModule &CGM) const override {
2281 unsigned Sig = (0xeb << 0) | // jmp rel8
2282 (0x06 << 8) | // .+0x08
2285 return llvm::ConstantInt::get(CGM.Int32Ty, Sig);
2288 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
2289 CodeGen::CodeGenModule &CGM,
2290 ForDefinition_t IsForDefinition) const override {
2291 if (!IsForDefinition)
2293 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
2294 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
2295 // Get the LLVM function.
2296 auto *Fn = cast<llvm::Function>(GV);
2298 // Now add the 'alignstack' attribute with a value of 16.
2299 llvm::AttrBuilder B;
2300 B.addStackAlignmentAttr(16);
2301 Fn->addAttributes(llvm::AttributeList::FunctionIndex, B);
2303 if (FD->hasAttr<AnyX86InterruptAttr>()) {
2304 llvm::Function *Fn = cast<llvm::Function>(GV);
2305 Fn->setCallingConv(llvm::CallingConv::X86_INTR);
2311 class PS4TargetCodeGenInfo : public X86_64TargetCodeGenInfo {
2313 PS4TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, X86AVXABILevel AVXLevel)
2314 : X86_64TargetCodeGenInfo(CGT, AVXLevel) {}
2316 void getDependentLibraryOption(llvm::StringRef Lib,
2317 llvm::SmallString<24> &Opt) const override {
2319 // If the argument contains a space, enclose it in quotes.
2320 if (Lib.find(" ") != StringRef::npos)
2321 Opt += "\"" + Lib.str() + "\"";
2327 static std::string qualifyWindowsLibrary(llvm::StringRef Lib) {
2328 // If the argument does not end in .lib, automatically add the suffix.
2329 // If the argument contains a space, enclose it in quotes.
2330 // This matches the behavior of MSVC.
2331 bool Quote = (Lib.find(" ") != StringRef::npos);
2332 std::string ArgStr = Quote ? "\"" : "";
2334 if (!Lib.endswith_lower(".lib"))
2336 ArgStr += Quote ? "\"" : "";
2340 class WinX86_32TargetCodeGenInfo : public X86_32TargetCodeGenInfo {
2342 WinX86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT,
2343 bool DarwinVectorABI, bool RetSmallStructInRegABI, bool Win32StructABI,
2344 unsigned NumRegisterParameters)
2345 : X86_32TargetCodeGenInfo(CGT, DarwinVectorABI, RetSmallStructInRegABI,
2346 Win32StructABI, NumRegisterParameters, false) {}
2348 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
2349 CodeGen::CodeGenModule &CGM,
2350 ForDefinition_t IsForDefinition) const override;
2352 void getDependentLibraryOption(llvm::StringRef Lib,
2353 llvm::SmallString<24> &Opt) const override {
2354 Opt = "/DEFAULTLIB:";
2355 Opt += qualifyWindowsLibrary(Lib);
2358 void getDetectMismatchOption(llvm::StringRef Name,
2359 llvm::StringRef Value,
2360 llvm::SmallString<32> &Opt) const override {
2361 Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\"";
2365 static void addStackProbeSizeTargetAttribute(const Decl *D,
2366 llvm::GlobalValue *GV,
2367 CodeGen::CodeGenModule &CGM) {
2368 if (D && isa<FunctionDecl>(D)) {
2369 if (CGM.getCodeGenOpts().StackProbeSize != 4096) {
2370 llvm::Function *Fn = cast<llvm::Function>(GV);
2372 Fn->addFnAttr("stack-probe-size",
2373 llvm::utostr(CGM.getCodeGenOpts().StackProbeSize));
2378 void WinX86_32TargetCodeGenInfo::setTargetAttributes(
2379 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM,
2380 ForDefinition_t IsForDefinition) const {
2381 X86_32TargetCodeGenInfo::setTargetAttributes(D, GV, CGM, IsForDefinition);
2382 if (!IsForDefinition)
2384 addStackProbeSizeTargetAttribute(D, GV, CGM);
2387 class WinX86_64TargetCodeGenInfo : public TargetCodeGenInfo {
2389 WinX86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT,
2390 X86AVXABILevel AVXLevel)
2391 : TargetCodeGenInfo(new WinX86_64ABIInfo(CGT)) {}
2393 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
2394 CodeGen::CodeGenModule &CGM,
2395 ForDefinition_t IsForDefinition) const override;
2397 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
2401 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
2402 llvm::Value *Address) const override {
2403 llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8);
2405 // 0-15 are the 16 integer registers.
2407 AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 16);
2411 void getDependentLibraryOption(llvm::StringRef Lib,
2412 llvm::SmallString<24> &Opt) const override {
2413 Opt = "/DEFAULTLIB:";
2414 Opt += qualifyWindowsLibrary(Lib);
2417 void getDetectMismatchOption(llvm::StringRef Name,
2418 llvm::StringRef Value,
2419 llvm::SmallString<32> &Opt) const override {
2420 Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\"";
2424 void WinX86_64TargetCodeGenInfo::setTargetAttributes(
2425 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM,
2426 ForDefinition_t IsForDefinition) const {
2427 TargetCodeGenInfo::setTargetAttributes(D, GV, CGM, IsForDefinition);
2428 if (!IsForDefinition)
2430 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
2431 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
2432 // Get the LLVM function.
2433 auto *Fn = cast<llvm::Function>(GV);
2435 // Now add the 'alignstack' attribute with a value of 16.
2436 llvm::AttrBuilder B;
2437 B.addStackAlignmentAttr(16);
2438 Fn->addAttributes(llvm::AttributeList::FunctionIndex, B);
2440 if (FD->hasAttr<AnyX86InterruptAttr>()) {
2441 llvm::Function *Fn = cast<llvm::Function>(GV);
2442 Fn->setCallingConv(llvm::CallingConv::X86_INTR);
2446 addStackProbeSizeTargetAttribute(D, GV, CGM);
2450 void X86_64ABIInfo::postMerge(unsigned AggregateSize, Class &Lo,
2452 // AMD64-ABI 3.2.3p2: Rule 5. Then a post merger cleanup is done:
2454 // (a) If one of the classes is Memory, the whole argument is passed in
2457 // (b) If X87UP is not preceded by X87, the whole argument is passed in
2460 // (c) If the size of the aggregate exceeds two eightbytes and the first
2461 // eightbyte isn't SSE or any other eightbyte isn't SSEUP, the whole
2462 // argument is passed in memory. NOTE: This is necessary to keep the
2463 // ABI working for processors that don't support the __m256 type.
2465 // (d) If SSEUP is not preceded by SSE or SSEUP, it is converted to SSE.
2467 // Some of these are enforced by the merging logic. Others can arise
2468 // only with unions; for example:
2469 // union { _Complex double; unsigned; }
2471 // Note that clauses (b) and (c) were added in 0.98.
2475 if (Hi == X87Up && Lo != X87 && honorsRevision0_98())
2477 if (AggregateSize > 128 && (Lo != SSE || Hi != SSEUp))
2479 if (Hi == SSEUp && Lo != SSE)
2483 X86_64ABIInfo::Class X86_64ABIInfo::merge(Class Accum, Class Field) {
2484 // AMD64-ABI 3.2.3p2: Rule 4. Each field of an object is
2485 // classified recursively so that always two fields are
2486 // considered. The resulting class is calculated according to
2487 // the classes of the fields in the eightbyte:
2489 // (a) If both classes are equal, this is the resulting class.
2491 // (b) If one of the classes is NO_CLASS, the resulting class is
2494 // (c) If one of the classes is MEMORY, the result is the MEMORY
2497 // (d) If one of the classes is INTEGER, the result is the
2500 // (e) If one of the classes is X87, X87UP, COMPLEX_X87 class,
2501 // MEMORY is used as class.
2503 // (f) Otherwise class SSE is used.
2505 // Accum should never be memory (we should have returned) or
2506 // ComplexX87 (because this cannot be passed in a structure).
2507 assert((Accum != Memory && Accum != ComplexX87) &&
2508 "Invalid accumulated classification during merge.");
2509 if (Accum == Field || Field == NoClass)
2511 if (Field == Memory)
2513 if (Accum == NoClass)
2515 if (Accum == Integer || Field == Integer)
2517 if (Field == X87 || Field == X87Up || Field == ComplexX87 ||
2518 Accum == X87 || Accum == X87Up)
2523 void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase,
2524 Class &Lo, Class &Hi, bool isNamedArg) const {
2525 // FIXME: This code can be simplified by introducing a simple value class for
2526 // Class pairs with appropriate constructor methods for the various
2529 // FIXME: Some of the split computations are wrong; unaligned vectors
2530 // shouldn't be passed in registers for example, so there is no chance they
2531 // can straddle an eightbyte. Verify & simplify.
2535 Class &Current = OffsetBase < 64 ? Lo : Hi;
2538 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
2539 BuiltinType::Kind k = BT->getKind();
2541 if (k == BuiltinType::Void) {
2543 } else if (k == BuiltinType::Int128 || k == BuiltinType::UInt128) {
2546 } else if (k >= BuiltinType::Bool && k <= BuiltinType::LongLong) {
2548 } else if (k == BuiltinType::Float || k == BuiltinType::Double) {
2550 } else if (k == BuiltinType::LongDouble) {
2551 const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat();
2552 if (LDF == &llvm::APFloat::IEEEquad()) {
2555 } else if (LDF == &llvm::APFloat::x87DoubleExtended()) {
2558 } else if (LDF == &llvm::APFloat::IEEEdouble()) {
2561 llvm_unreachable("unexpected long double representation!");
2563 // FIXME: _Decimal32 and _Decimal64 are SSE.
2564 // FIXME: _float128 and _Decimal128 are (SSE, SSEUp).
2568 if (const EnumType *ET = Ty->getAs<EnumType>()) {
2569 // Classify the underlying integer type.
2570 classify(ET->getDecl()->getIntegerType(), OffsetBase, Lo, Hi, isNamedArg);
2574 if (Ty->hasPointerRepresentation()) {
2579 if (Ty->isMemberPointerType()) {
2580 if (Ty->isMemberFunctionPointerType()) {
2581 if (Has64BitPointers) {
2582 // If Has64BitPointers, this is an {i64, i64}, so classify both
2586 // Otherwise, with 32-bit pointers, this is an {i32, i32}. If that
2587 // straddles an eightbyte boundary, Hi should be classified as well.
2588 uint64_t EB_FuncPtr = (OffsetBase) / 64;
2589 uint64_t EB_ThisAdj = (OffsetBase + 64 - 1) / 64;
2590 if (EB_FuncPtr != EB_ThisAdj) {
2602 if (const VectorType *VT = Ty->getAs<VectorType>()) {
2603 uint64_t Size = getContext().getTypeSize(VT);
2604 if (Size == 1 || Size == 8 || Size == 16 || Size == 32) {
2605 // gcc passes the following as integer:
2606 // 4 bytes - <4 x char>, <2 x short>, <1 x int>, <1 x float>
2607 // 2 bytes - <2 x char>, <1 x short>
2608 // 1 byte - <1 x char>
2611 // If this type crosses an eightbyte boundary, it should be
2613 uint64_t EB_Lo = (OffsetBase) / 64;
2614 uint64_t EB_Hi = (OffsetBase + Size - 1) / 64;
2617 } else if (Size == 64) {
2618 QualType ElementType = VT->getElementType();
2620 // gcc passes <1 x double> in memory. :(
2621 if (ElementType->isSpecificBuiltinType(BuiltinType::Double))
2624 // gcc passes <1 x long long> as SSE but clang used to unconditionally
2625 // pass them as integer. For platforms where clang is the de facto
2626 // platform compiler, we must continue to use integer.
2627 if (!classifyIntegerMMXAsSSE() &&
2628 (ElementType->isSpecificBuiltinType(BuiltinType::LongLong) ||
2629 ElementType->isSpecificBuiltinType(BuiltinType::ULongLong) ||
2630 ElementType->isSpecificBuiltinType(BuiltinType::Long) ||
2631 ElementType->isSpecificBuiltinType(BuiltinType::ULong)))
2636 // If this type crosses an eightbyte boundary, it should be
2638 if (OffsetBase && OffsetBase != 64)
2640 } else if (Size == 128 ||
2641 (isNamedArg && Size <= getNativeVectorSizeForAVXABI(AVXLevel))) {
2642 // Arguments of 256-bits are split into four eightbyte chunks. The
2643 // least significant one belongs to class SSE and all the others to class
2644 // SSEUP. The original Lo and Hi design considers that types can't be
2645 // greater than 128-bits, so a 64-bit split in Hi and Lo makes sense.
2646 // This design isn't correct for 256-bits, but since there're no cases
2647 // where the upper parts would need to be inspected, avoid adding
2648 // complexity and just consider Hi to match the 64-256 part.
2650 // Note that per 3.5.7 of AMD64-ABI, 256-bit args are only passed in
2651 // registers if they are "named", i.e. not part of the "..." of a
2652 // variadic function.
2654 // Similarly, per 3.2.3. of the AVX512 draft, 512-bits ("named") args are
2655 // split into eight eightbyte chunks, one SSE and seven SSEUP.
2662 if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
2663 QualType ET = getContext().getCanonicalType(CT->getElementType());
2665 uint64_t Size = getContext().getTypeSize(Ty);
2666 if (ET->isIntegralOrEnumerationType()) {
2669 else if (Size <= 128)
2671 } else if (ET == getContext().FloatTy) {
2673 } else if (ET == getContext().DoubleTy) {
2675 } else if (ET == getContext().LongDoubleTy) {
2676 const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat();
2677 if (LDF == &llvm::APFloat::IEEEquad())
2679 else if (LDF == &llvm::APFloat::x87DoubleExtended())
2680 Current = ComplexX87;
2681 else if (LDF == &llvm::APFloat::IEEEdouble())
2684 llvm_unreachable("unexpected long double representation!");
2687 // If this complex type crosses an eightbyte boundary then it
2689 uint64_t EB_Real = (OffsetBase) / 64;
2690 uint64_t EB_Imag = (OffsetBase + getContext().getTypeSize(ET)) / 64;
2691 if (Hi == NoClass && EB_Real != EB_Imag)
2697 if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
2698 // Arrays are treated like structures.
2700 uint64_t Size = getContext().getTypeSize(Ty);
2702 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
2703 // than eight eightbytes, ..., it has class MEMORY.
2707 // AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned
2708 // fields, it has class MEMORY.
2710 // Only need to check alignment of array base.
2711 if (OffsetBase % getContext().getTypeAlign(AT->getElementType()))
2714 // Otherwise implement simplified merge. We could be smarter about
2715 // this, but it isn't worth it and would be harder to verify.
2717 uint64_t EltSize = getContext().getTypeSize(AT->getElementType());
2718 uint64_t ArraySize = AT->getSize().getZExtValue();
2720 // The only case a 256-bit wide vector could be used is when the array
2721 // contains a single 256-bit element. Since Lo and Hi logic isn't extended
2722 // to work for sizes wider than 128, early check and fallback to memory.
2725 (Size != EltSize || Size > getNativeVectorSizeForAVXABI(AVXLevel)))
2728 for (uint64_t i=0, Offset=OffsetBase; i<ArraySize; ++i, Offset += EltSize) {
2729 Class FieldLo, FieldHi;
2730 classify(AT->getElementType(), Offset, FieldLo, FieldHi, isNamedArg);
2731 Lo = merge(Lo, FieldLo);
2732 Hi = merge(Hi, FieldHi);
2733 if (Lo == Memory || Hi == Memory)
2737 postMerge(Size, Lo, Hi);
2738 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp array classification.");
2742 if (const RecordType *RT = Ty->getAs<RecordType>()) {
2743 uint64_t Size = getContext().getTypeSize(Ty);
2745 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
2746 // than eight eightbytes, ..., it has class MEMORY.
2750 // AMD64-ABI 3.2.3p2: Rule 2. If a C++ object has either a non-trivial
2751 // copy constructor or a non-trivial destructor, it is passed by invisible
2753 if (getRecordArgABI(RT, getCXXABI()))
2756 const RecordDecl *RD = RT->getDecl();
2758 // Assume variable sized types are passed in memory.
2759 if (RD->hasFlexibleArrayMember())
2762 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
2764 // Reset Lo class, this will be recomputed.
2767 // If this is a C++ record, classify the bases first.
2768 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
2769 for (const auto &I : CXXRD->bases()) {
2770 assert(!I.isVirtual() && !I.getType()->isDependentType() &&
2771 "Unexpected base class!");
2772 const CXXRecordDecl *Base =
2773 cast<CXXRecordDecl>(I.getType()->getAs<RecordType>()->getDecl());
2775 // Classify this field.
2777 // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate exceeds a
2778 // single eightbyte, each is classified separately. Each eightbyte gets
2779 // initialized to class NO_CLASS.
2780 Class FieldLo, FieldHi;
2782 OffsetBase + getContext().toBits(Layout.getBaseClassOffset(Base));
2783 classify(I.getType(), Offset, FieldLo, FieldHi, isNamedArg);
2784 Lo = merge(Lo, FieldLo);
2785 Hi = merge(Hi, FieldHi);
2786 if (Lo == Memory || Hi == Memory) {
2787 postMerge(Size, Lo, Hi);
2793 // Classify the fields one at a time, merging the results.
2795 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
2796 i != e; ++i, ++idx) {
2797 uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx);
2798 bool BitField = i->isBitField();
2800 // Ignore padding bit-fields.
2801 if (BitField && i->isUnnamedBitfield())
2804 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger than
2805 // four eightbytes, or it contains unaligned fields, it has class MEMORY.
2807 // The only case a 256-bit wide vector could be used is when the struct
2808 // contains a single 256-bit element. Since Lo and Hi logic isn't extended
2809 // to work for sizes wider than 128, early check and fallback to memory.
2811 if (Size > 128 && (Size != getContext().getTypeSize(i->getType()) ||
2812 Size > getNativeVectorSizeForAVXABI(AVXLevel))) {
2814 postMerge(Size, Lo, Hi);
2817 // Note, skip this test for bit-fields, see below.
2818 if (!BitField && Offset % getContext().getTypeAlign(i->getType())) {
2820 postMerge(Size, Lo, Hi);
2824 // Classify this field.
2826 // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate
2827 // exceeds a single eightbyte, each is classified
2828 // separately. Each eightbyte gets initialized to class
2830 Class FieldLo, FieldHi;
2832 // Bit-fields require special handling, they do not force the
2833 // structure to be passed in memory even if unaligned, and
2834 // therefore they can straddle an eightbyte.
2836 assert(!i->isUnnamedBitfield());
2837 uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx);
2838 uint64_t Size = i->getBitWidthValue(getContext());
2840 uint64_t EB_Lo = Offset / 64;
2841 uint64_t EB_Hi = (Offset + Size - 1) / 64;
2844 assert(EB_Hi == EB_Lo && "Invalid classification, type > 16 bytes.");
2849 FieldHi = EB_Hi ? Integer : NoClass;
2852 classify(i->getType(), Offset, FieldLo, FieldHi, isNamedArg);
2853 Lo = merge(Lo, FieldLo);
2854 Hi = merge(Hi, FieldHi);
2855 if (Lo == Memory || Hi == Memory)
2859 postMerge(Size, Lo, Hi);
2863 ABIArgInfo X86_64ABIInfo::getIndirectReturnResult(QualType Ty) const {
2864 // If this is a scalar LLVM value then assume LLVM will pass it in the right
2866 if (!isAggregateTypeForABI(Ty)) {
2867 // Treat an enum type as its underlying type.
2868 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
2869 Ty = EnumTy->getDecl()->getIntegerType();
2871 return (Ty->isPromotableIntegerType() ?
2872 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
2875 return getNaturalAlignIndirect(Ty);
2878 bool X86_64ABIInfo::IsIllegalVectorType(QualType Ty) const {
2879 if (const VectorType *VecTy = Ty->getAs<VectorType>()) {
2880 uint64_t Size = getContext().getTypeSize(VecTy);
2881 unsigned LargestVector = getNativeVectorSizeForAVXABI(AVXLevel);
2882 if (Size <= 64 || Size > LargestVector)
2889 ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty,
2890 unsigned freeIntRegs) const {
2891 // If this is a scalar LLVM value then assume LLVM will pass it in the right
2894 // This assumption is optimistic, as there could be free registers available
2895 // when we need to pass this argument in memory, and LLVM could try to pass
2896 // the argument in the free register. This does not seem to happen currently,
2897 // but this code would be much safer if we could mark the argument with
2898 // 'onstack'. See PR12193.
2899 if (!isAggregateTypeForABI(Ty) && !IsIllegalVectorType(Ty)) {
2900 // Treat an enum type as its underlying type.
2901 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
2902 Ty = EnumTy->getDecl()->getIntegerType();
2904 return (Ty->isPromotableIntegerType() ?
2905 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
2908 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
2909 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
2911 // Compute the byval alignment. We specify the alignment of the byval in all
2912 // cases so that the mid-level optimizer knows the alignment of the byval.
2913 unsigned Align = std::max(getContext().getTypeAlign(Ty) / 8, 8U);
2915 // Attempt to avoid passing indirect results using byval when possible. This
2916 // is important for good codegen.
2918 // We do this by coercing the value into a scalar type which the backend can
2919 // handle naturally (i.e., without using byval).
2921 // For simplicity, we currently only do this when we have exhausted all of the
2922 // free integer registers. Doing this when there are free integer registers
2923 // would require more care, as we would have to ensure that the coerced value
2924 // did not claim the unused register. That would require either reording the
2925 // arguments to the function (so that any subsequent inreg values came first),
2926 // or only doing this optimization when there were no following arguments that
2929 // We currently expect it to be rare (particularly in well written code) for
2930 // arguments to be passed on the stack when there are still free integer
2931 // registers available (this would typically imply large structs being passed
2932 // by value), so this seems like a fair tradeoff for now.
2934 // We can revisit this if the backend grows support for 'onstack' parameter
2935 // attributes. See PR12193.
2936 if (freeIntRegs == 0) {
2937 uint64_t Size = getContext().getTypeSize(Ty);
2939 // If this type fits in an eightbyte, coerce it into the matching integral
2940 // type, which will end up on the stack (with alignment 8).
2941 if (Align == 8 && Size <= 64)
2942 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
2946 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(Align));
2949 /// The ABI specifies that a value should be passed in a full vector XMM/YMM
2950 /// register. Pick an LLVM IR type that will be passed as a vector register.
2951 llvm::Type *X86_64ABIInfo::GetByteVectorType(QualType Ty) const {
2952 // Wrapper structs/arrays that only contain vectors are passed just like
2953 // vectors; strip them off if present.
2954 if (const Type *InnerTy = isSingleElementStruct(Ty, getContext()))
2955 Ty = QualType(InnerTy, 0);
2957 llvm::Type *IRType = CGT.ConvertType(Ty);
2958 if (isa<llvm::VectorType>(IRType) ||
2959 IRType->getTypeID() == llvm::Type::FP128TyID)
2962 // We couldn't find the preferred IR vector type for 'Ty'.
2963 uint64_t Size = getContext().getTypeSize(Ty);
2964 assert((Size == 128 || Size == 256 || Size == 512) && "Invalid type found!");
2966 // Return a LLVM IR vector type based on the size of 'Ty'.
2967 return llvm::VectorType::get(llvm::Type::getDoubleTy(getVMContext()),
2971 /// BitsContainNoUserData - Return true if the specified [start,end) bit range
2972 /// is known to either be off the end of the specified type or being in
2973 /// alignment padding. The user type specified is known to be at most 128 bits
2974 /// in size, and have passed through X86_64ABIInfo::classify with a successful
2975 /// classification that put one of the two halves in the INTEGER class.
2977 /// It is conservatively correct to return false.
2978 static bool BitsContainNoUserData(QualType Ty, unsigned StartBit,
2979 unsigned EndBit, ASTContext &Context) {
2980 // If the bytes being queried are off the end of the type, there is no user
2981 // data hiding here. This handles analysis of builtins, vectors and other
2982 // types that don't contain interesting padding.
2983 unsigned TySize = (unsigned)Context.getTypeSize(Ty);
2984 if (TySize <= StartBit)
2987 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) {
2988 unsigned EltSize = (unsigned)Context.getTypeSize(AT->getElementType());
2989 unsigned NumElts = (unsigned)AT->getSize().getZExtValue();
2991 // Check each element to see if the element overlaps with the queried range.
2992 for (unsigned i = 0; i != NumElts; ++i) {
2993 // If the element is after the span we care about, then we're done..
2994 unsigned EltOffset = i*EltSize;
2995 if (EltOffset >= EndBit) break;
2997 unsigned EltStart = EltOffset < StartBit ? StartBit-EltOffset :0;
2998 if (!BitsContainNoUserData(AT->getElementType(), EltStart,
2999 EndBit-EltOffset, Context))
3002 // If it overlaps no elements, then it is safe to process as padding.
3006 if (const RecordType *RT = Ty->getAs<RecordType>()) {
3007 const RecordDecl *RD = RT->getDecl();
3008 const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
3010 // If this is a C++ record, check the bases first.
3011 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
3012 for (const auto &I : CXXRD->bases()) {
3013 assert(!I.isVirtual() && !I.getType()->isDependentType() &&
3014 "Unexpected base class!");
3015 const CXXRecordDecl *Base =
3016 cast<CXXRecordDecl>(I.getType()->getAs<RecordType>()->getDecl());
3018 // If the base is after the span we care about, ignore it.
3019 unsigned BaseOffset = Context.toBits(Layout.getBaseClassOffset(Base));
3020 if (BaseOffset >= EndBit) continue;
3022 unsigned BaseStart = BaseOffset < StartBit ? StartBit-BaseOffset :0;
3023 if (!BitsContainNoUserData(I.getType(), BaseStart,
3024 EndBit-BaseOffset, Context))
3029 // Verify that no field has data that overlaps the region of interest. Yes
3030 // this could be sped up a lot by being smarter about queried fields,
3031 // however we're only looking at structs up to 16 bytes, so we don't care
3034 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
3035 i != e; ++i, ++idx) {
3036 unsigned FieldOffset = (unsigned)Layout.getFieldOffset(idx);
3038 // If we found a field after the region we care about, then we're done.
3039 if (FieldOffset >= EndBit) break;
3041 unsigned FieldStart = FieldOffset < StartBit ? StartBit-FieldOffset :0;
3042 if (!BitsContainNoUserData(i->getType(), FieldStart, EndBit-FieldOffset,
3047 // If nothing in this record overlapped the area of interest, then we're
3055 /// ContainsFloatAtOffset - Return true if the specified LLVM IR type has a
3056 /// float member at the specified offset. For example, {int,{float}} has a
3057 /// float at offset 4. It is conservatively correct for this routine to return
3059 static bool ContainsFloatAtOffset(llvm::Type *IRType, unsigned IROffset,
3060 const llvm::DataLayout &TD) {
3061 // Base case if we find a float.
3062 if (IROffset == 0 && IRType->isFloatTy())
3065 // If this is a struct, recurse into the field at the specified offset.
3066 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {
3067 const llvm::StructLayout *SL = TD.getStructLayout(STy);
3068 unsigned Elt = SL->getElementContainingOffset(IROffset);
3069 IROffset -= SL->getElementOffset(Elt);
3070 return ContainsFloatAtOffset(STy->getElementType(Elt), IROffset, TD);
3073 // If this is an array, recurse into the field at the specified offset.
3074 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
3075 llvm::Type *EltTy = ATy->getElementType();
3076 unsigned EltSize = TD.getTypeAllocSize(EltTy);
3077 IROffset -= IROffset/EltSize*EltSize;
3078 return ContainsFloatAtOffset(EltTy, IROffset, TD);
3085 /// GetSSETypeAtOffset - Return a type that will be passed by the backend in the
3086 /// low 8 bytes of an XMM register, corresponding to the SSE class.
3087 llvm::Type *X86_64ABIInfo::
3088 GetSSETypeAtOffset(llvm::Type *IRType, unsigned IROffset,
3089 QualType SourceTy, unsigned SourceOffset) const {
3090 // The only three choices we have are either double, <2 x float>, or float. We
3091 // pass as float if the last 4 bytes is just padding. This happens for
3092 // structs that contain 3 floats.
3093 if (BitsContainNoUserData(SourceTy, SourceOffset*8+32,
3094 SourceOffset*8+64, getContext()))
3095 return llvm::Type::getFloatTy(getVMContext());
3097 // We want to pass as <2 x float> if the LLVM IR type contains a float at
3098 // offset+0 and offset+4. Walk the LLVM IR type to find out if this is the
3100 if (ContainsFloatAtOffset(IRType, IROffset, getDataLayout()) &&
3101 ContainsFloatAtOffset(IRType, IROffset+4, getDataLayout()))
3102 return llvm::VectorType::get(llvm::Type::getFloatTy(getVMContext()), 2);
3104 return llvm::Type::getDoubleTy(getVMContext());
3108 /// GetINTEGERTypeAtOffset - The ABI specifies that a value should be passed in
3109 /// an 8-byte GPR. This means that we either have a scalar or we are talking
3110 /// about the high or low part of an up-to-16-byte struct. This routine picks
3111 /// the best LLVM IR type to represent this, which may be i64 or may be anything
3112 /// else that the backend will pass in a GPR that works better (e.g. i8, %foo*,
3115 /// PrefType is an LLVM IR type that corresponds to (part of) the IR type for
3116 /// the source type. IROffset is an offset in bytes into the LLVM IR type that
3117 /// the 8-byte value references. PrefType may be null.
3119 /// SourceTy is the source-level type for the entire argument. SourceOffset is
3120 /// an offset into this that we're processing (which is always either 0 or 8).
3122 llvm::Type *X86_64ABIInfo::
3123 GetINTEGERTypeAtOffset(llvm::Type *IRType, unsigned IROffset,
3124 QualType SourceTy, unsigned SourceOffset) const {
3125 // If we're dealing with an un-offset LLVM IR type, then it means that we're
3126 // returning an 8-byte unit starting with it. See if we can safely use it.
3127 if (IROffset == 0) {
3128 // Pointers and int64's always fill the 8-byte unit.
3129 if ((isa<llvm::PointerType>(IRType) && Has64BitPointers) ||
3130 IRType->isIntegerTy(64))
3133 // If we have a 1/2/4-byte integer, we can use it only if the rest of the
3134 // goodness in the source type is just tail padding. This is allowed to
3135 // kick in for struct {double,int} on the int, but not on
3136 // struct{double,int,int} because we wouldn't return the second int. We
3137 // have to do this analysis on the source type because we can't depend on
3138 // unions being lowered a specific way etc.
3139 if (IRType->isIntegerTy(8) || IRType->isIntegerTy(16) ||
3140 IRType->isIntegerTy(32) ||
3141 (isa<llvm::PointerType>(IRType) && !Has64BitPointers)) {
3142 unsigned BitWidth = isa<llvm::PointerType>(IRType) ? 32 :
3143 cast<llvm::IntegerType>(IRType)->getBitWidth();
3145 if (BitsContainNoUserData(SourceTy, SourceOffset*8+BitWidth,
3146 SourceOffset*8+64, getContext()))
3151 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {
3152 // If this is a struct, recurse into the field at the specified offset.
3153 const llvm::StructLayout *SL = getDataLayout().getStructLayout(STy);
3154 if (IROffset < SL->getSizeInBytes()) {
3155 unsigned FieldIdx = SL->getElementContainingOffset(IROffset);
3156 IROffset -= SL->getElementOffset(FieldIdx);
3158 return GetINTEGERTypeAtOffset(STy->getElementType(FieldIdx), IROffset,
3159 SourceTy, SourceOffset);
3163 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
3164 llvm::Type *EltTy = ATy->getElementType();
3165 unsigned EltSize = getDataLayout().getTypeAllocSize(EltTy);
3166 unsigned EltOffset = IROffset/EltSize*EltSize;
3167 return GetINTEGERTypeAtOffset(EltTy, IROffset-EltOffset, SourceTy,
3171 // Okay, we don't have any better idea of what to pass, so we pass this in an
3172 // integer register that isn't too big to fit the rest of the struct.
3173 unsigned TySizeInBytes =
3174 (unsigned)getContext().getTypeSizeInChars(SourceTy).getQuantity();
3176 assert(TySizeInBytes != SourceOffset && "Empty field?");
3178 // It is always safe to classify this as an integer type up to i64 that
3179 // isn't larger than the structure.
3180 return llvm::IntegerType::get(getVMContext(),
3181 std::min(TySizeInBytes-SourceOffset, 8U)*8);
3185 /// GetX86_64ByValArgumentPair - Given a high and low type that can ideally
3186 /// be used as elements of a two register pair to pass or return, return a
3187 /// first class aggregate to represent them. For example, if the low part of
3188 /// a by-value argument should be passed as i32* and the high part as float,
3189 /// return {i32*, float}.
3191 GetX86_64ByValArgumentPair(llvm::Type *Lo, llvm::Type *Hi,
3192 const llvm::DataLayout &TD) {
3193 // In order to correctly satisfy the ABI, we need to the high part to start
3194 // at offset 8. If the high and low parts we inferred are both 4-byte types
3195 // (e.g. i32 and i32) then the resultant struct type ({i32,i32}) won't have
3196 // the second element at offset 8. Check for this:
3197 unsigned LoSize = (unsigned)TD.getTypeAllocSize(Lo);
3198 unsigned HiAlign = TD.getABITypeAlignment(Hi);
3199 unsigned HiStart = llvm::alignTo(LoSize, HiAlign);
3200 assert(HiStart != 0 && HiStart <= 8 && "Invalid x86-64 argument pair!");
3202 // To handle this, we have to increase the size of the low part so that the
3203 // second element will start at an 8 byte offset. We can't increase the size
3204 // of the second element because it might make us access off the end of the
3207 // There are usually two sorts of types the ABI generation code can produce
3208 // for the low part of a pair that aren't 8 bytes in size: float or
3209 // i8/i16/i32. This can also include pointers when they are 32-bit (X32 and
3211 // Promote these to a larger type.
3212 if (Lo->isFloatTy())
3213 Lo = llvm::Type::getDoubleTy(Lo->getContext());
3215 assert((Lo->isIntegerTy() || Lo->isPointerTy())
3216 && "Invalid/unknown lo type");
3217 Lo = llvm::Type::getInt64Ty(Lo->getContext());
3221 llvm::StructType *Result = llvm::StructType::get(Lo, Hi);
3223 // Verify that the second element is at an 8-byte offset.
3224 assert(TD.getStructLayout(Result)->getElementOffset(1) == 8 &&
3225 "Invalid x86-64 argument pair!");
3229 ABIArgInfo X86_64ABIInfo::
3230 classifyReturnType(QualType RetTy) const {
3231 // AMD64-ABI 3.2.3p4: Rule 1. Classify the return type with the
3232 // classification algorithm.
3233 X86_64ABIInfo::Class Lo, Hi;
3234 classify(RetTy, 0, Lo, Hi, /*isNamedArg*/ true);
3236 // Check some invariants.
3237 assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.");
3238 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.");
3240 llvm::Type *ResType = nullptr;
3244 return ABIArgInfo::getIgnore();
3245 // If the low part is just padding, it takes no register, leave ResType
3247 assert((Hi == SSE || Hi == Integer || Hi == X87Up) &&
3248 "Unknown missing lo part");
3253 llvm_unreachable("Invalid classification for lo word.");
3255 // AMD64-ABI 3.2.3p4: Rule 2. Types of class memory are returned via
3258 return getIndirectReturnResult(RetTy);
3260 // AMD64-ABI 3.2.3p4: Rule 3. If the class is INTEGER, the next
3261 // available register of the sequence %rax, %rdx is used.
3263 ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0);
3265 // If we have a sign or zero extended integer, make sure to return Extend
3266 // so that the parameter gets the right LLVM IR attributes.
3267 if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) {
3268 // Treat an enum type as its underlying type.
3269 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
3270 RetTy = EnumTy->getDecl()->getIntegerType();
3272 if (RetTy->isIntegralOrEnumerationType() &&
3273 RetTy->isPromotableIntegerType())
3274 return ABIArgInfo::getExtend();
3278 // AMD64-ABI 3.2.3p4: Rule 4. If the class is SSE, the next
3279 // available SSE register of the sequence %xmm0, %xmm1 is used.
3281 ResType = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0);
3284 // AMD64-ABI 3.2.3p4: Rule 6. If the class is X87, the value is
3285 // returned on the X87 stack in %st0 as 80-bit x87 number.
3287 ResType = llvm::Type::getX86_FP80Ty(getVMContext());
3290 // AMD64-ABI 3.2.3p4: Rule 8. If the class is COMPLEX_X87, the real
3291 // part of the value is returned in %st0 and the imaginary part in
3294 assert(Hi == ComplexX87 && "Unexpected ComplexX87 classification.");
3295 ResType = llvm::StructType::get(llvm::Type::getX86_FP80Ty(getVMContext()),
3296 llvm::Type::getX86_FP80Ty(getVMContext()));
3300 llvm::Type *HighPart = nullptr;
3302 // Memory was handled previously and X87 should
3303 // never occur as a hi class.
3306 llvm_unreachable("Invalid classification for hi word.");
3308 case ComplexX87: // Previously handled.
3313 HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
3314 if (Lo == NoClass) // Return HighPart at offset 8 in memory.
3315 return ABIArgInfo::getDirect(HighPart, 8);
3318 HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
3319 if (Lo == NoClass) // Return HighPart at offset 8 in memory.
3320 return ABIArgInfo::getDirect(HighPart, 8);
3323 // AMD64-ABI 3.2.3p4: Rule 5. If the class is SSEUP, the eightbyte
3324 // is passed in the next available eightbyte chunk if the last used
3327 // SSEUP should always be preceded by SSE, just widen.
3329 assert(Lo == SSE && "Unexpected SSEUp classification.");
3330 ResType = GetByteVectorType(RetTy);
3333 // AMD64-ABI 3.2.3p4: Rule 7. If the class is X87UP, the value is
3334 // returned together with the previous X87 value in %st0.
3336 // If X87Up is preceded by X87, we don't need to do
3337 // anything. However, in some cases with unions it may not be
3338 // preceded by X87. In such situations we follow gcc and pass the
3339 // extra bits in an SSE reg.
3341 HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
3342 if (Lo == NoClass) // Return HighPart at offset 8 in memory.
3343 return ABIArgInfo::getDirect(HighPart, 8);
3348 // If a high part was specified, merge it together with the low part. It is
3349 // known to pass in the high eightbyte of the result. We do this by forming a
3350 // first class struct aggregate with the high and low part: {low, high}
3352 ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getDataLayout());
3354 return ABIArgInfo::getDirect(ResType);
3357 ABIArgInfo X86_64ABIInfo::classifyArgumentType(
3358 QualType Ty, unsigned freeIntRegs, unsigned &neededInt, unsigned &neededSSE,
3362 Ty = useFirstFieldIfTransparentUnion(Ty);
3364 X86_64ABIInfo::Class Lo, Hi;
3365 classify(Ty, 0, Lo, Hi, isNamedArg);
3367 // Check some invariants.
3368 // FIXME: Enforce these by construction.
3369 assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.");
3370 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.");
3374 llvm::Type *ResType = nullptr;
3378 return ABIArgInfo::getIgnore();
3379 // If the low part is just padding, it takes no register, leave ResType
3381 assert((Hi == SSE || Hi == Integer || Hi == X87Up) &&
3382 "Unknown missing lo part");
3385 // AMD64-ABI 3.2.3p3: Rule 1. If the class is MEMORY, pass the argument
3389 // AMD64-ABI 3.2.3p3: Rule 5. If the class is X87, X87UP or
3390 // COMPLEX_X87, it is passed in memory.
3393 if (getRecordArgABI(Ty, getCXXABI()) == CGCXXABI::RAA_Indirect)
3395 return getIndirectResult(Ty, freeIntRegs);
3399 llvm_unreachable("Invalid classification for lo word.");
3401 // AMD64-ABI 3.2.3p3: Rule 2. If the class is INTEGER, the next
3402 // available register of the sequence %rdi, %rsi, %rdx, %rcx, %r8
3407 // Pick an 8-byte type based on the preferred type.
3408 ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 0, Ty, 0);
3410 // If we have a sign or zero extended integer, make sure to return Extend
3411 // so that the parameter gets the right LLVM IR attributes.
3412 if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) {
3413 // Treat an enum type as its underlying type.
3414 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
3415 Ty = EnumTy->getDecl()->getIntegerType();
3417 if (Ty->isIntegralOrEnumerationType() &&
3418 Ty->isPromotableIntegerType())
3419 return ABIArgInfo::getExtend();
3424 // AMD64-ABI 3.2.3p3: Rule 3. If the class is SSE, the next
3425 // available SSE register is used, the registers are taken in the
3426 // order from %xmm0 to %xmm7.
3428 llvm::Type *IRType = CGT.ConvertType(Ty);
3429 ResType = GetSSETypeAtOffset(IRType, 0, Ty, 0);
3435 llvm::Type *HighPart = nullptr;
3437 // Memory was handled previously, ComplexX87 and X87 should
3438 // never occur as hi classes, and X87Up must be preceded by X87,
3439 // which is passed in memory.
3443 llvm_unreachable("Invalid classification for hi word.");
3445 case NoClass: break;
3449 // Pick an 8-byte type based on the preferred type.
3450 HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8);
3452 if (Lo == NoClass) // Pass HighPart at offset 8 in memory.
3453 return ABIArgInfo::getDirect(HighPart, 8);
3456 // X87Up generally doesn't occur here (long double is passed in
3457 // memory), except in situations involving unions.
3460 HighPart = GetSSETypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8);
3462 if (Lo == NoClass) // Pass HighPart at offset 8 in memory.
3463 return ABIArgInfo::getDirect(HighPart, 8);
3468 // AMD64-ABI 3.2.3p3: Rule 4. If the class is SSEUP, the
3469 // eightbyte is passed in the upper half of the last used SSE
3470 // register. This only happens when 128-bit vectors are passed.
3472 assert(Lo == SSE && "Unexpected SSEUp classification");
3473 ResType = GetByteVectorType(Ty);
3477 // If a high part was specified, merge it together with the low part. It is
3478 // known to pass in the high eightbyte of the result. We do this by forming a
3479 // first class struct aggregate with the high and low part: {low, high}
3481 ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getDataLayout());
3483 return ABIArgInfo::getDirect(ResType);
3487 X86_64ABIInfo::classifyRegCallStructTypeImpl(QualType Ty, unsigned &NeededInt,
3488 unsigned &NeededSSE) const {
3489 auto RT = Ty->getAs<RecordType>();
3490 assert(RT && "classifyRegCallStructType only valid with struct types");
3492 if (RT->getDecl()->hasFlexibleArrayMember())
3493 return getIndirectReturnResult(Ty);
3496 if (auto CXXRD = dyn_cast<CXXRecordDecl>(RT->getDecl())) {
3497 if (CXXRD->isDynamicClass()) {
3498 NeededInt = NeededSSE = 0;
3499 return getIndirectReturnResult(Ty);
3502 for (const auto &I : CXXRD->bases())
3503 if (classifyRegCallStructTypeImpl(I.getType(), NeededInt, NeededSSE)
3505 NeededInt = NeededSSE = 0;
3506 return getIndirectReturnResult(Ty);
3511 for (const auto *FD : RT->getDecl()->fields()) {
3512 if (FD->getType()->isRecordType() && !FD->getType()->isUnionType()) {
3513 if (classifyRegCallStructTypeImpl(FD->getType(), NeededInt, NeededSSE)
3515 NeededInt = NeededSSE = 0;
3516 return getIndirectReturnResult(Ty);
3519 unsigned LocalNeededInt, LocalNeededSSE;
3520 if (classifyArgumentType(FD->getType(), UINT_MAX, LocalNeededInt,
3521 LocalNeededSSE, true)
3523 NeededInt = NeededSSE = 0;
3524 return getIndirectReturnResult(Ty);
3526 NeededInt += LocalNeededInt;
3527 NeededSSE += LocalNeededSSE;
3531 return ABIArgInfo::getDirect();
3534 ABIArgInfo X86_64ABIInfo::classifyRegCallStructType(QualType Ty,
3535 unsigned &NeededInt,
3536 unsigned &NeededSSE) const {
3541 return classifyRegCallStructTypeImpl(Ty, NeededInt, NeededSSE);
3544 void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const {
3546 const unsigned CallingConv = FI.getCallingConvention();
3547 // It is possible to force Win64 calling convention on any x86_64 target by
3548 // using __attribute__((ms_abi)). In such case to correctly emit Win64
3549 // compatible code delegate this call to WinX86_64ABIInfo::computeInfo.
3550 if (CallingConv == llvm::CallingConv::Win64) {
3551 WinX86_64ABIInfo Win64ABIInfo(CGT);
3552 Win64ABIInfo.computeInfo(FI);
3556 bool IsRegCall = CallingConv == llvm::CallingConv::X86_RegCall;
3558 // Keep track of the number of assigned registers.
3559 unsigned FreeIntRegs = IsRegCall ? 11 : 6;
3560 unsigned FreeSSERegs = IsRegCall ? 16 : 8;
3561 unsigned NeededInt, NeededSSE;
3563 if (!getCXXABI().classifyReturnType(FI)) {
3564 if (IsRegCall && FI.getReturnType()->getTypePtr()->isRecordType() &&
3565 !FI.getReturnType()->getTypePtr()->isUnionType()) {
3566 FI.getReturnInfo() =
3567 classifyRegCallStructType(FI.getReturnType(), NeededInt, NeededSSE);
3568 if (FreeIntRegs >= NeededInt && FreeSSERegs >= NeededSSE) {
3569 FreeIntRegs -= NeededInt;
3570 FreeSSERegs -= NeededSSE;
3572 FI.getReturnInfo() = getIndirectReturnResult(FI.getReturnType());
3574 } else if (IsRegCall && FI.getReturnType()->getAs<ComplexType>()) {
3575 // Complex Long Double Type is passed in Memory when Regcall
3576 // calling convention is used.
3577 const ComplexType *CT = FI.getReturnType()->getAs<ComplexType>();
3578 if (getContext().getCanonicalType(CT->getElementType()) ==
3579 getContext().LongDoubleTy)
3580 FI.getReturnInfo() = getIndirectReturnResult(FI.getReturnType());
3582 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
3585 // If the return value is indirect, then the hidden argument is consuming one
3586 // integer register.
3587 if (FI.getReturnInfo().isIndirect())
3590 // The chain argument effectively gives us another free register.
3591 if (FI.isChainCall())
3594 unsigned NumRequiredArgs = FI.getNumRequiredArgs();
3595 // AMD64-ABI 3.2.3p3: Once arguments are classified, the registers
3596 // get assigned (in left-to-right order) for passing as follows...
3598 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
3599 it != ie; ++it, ++ArgNo) {
3600 bool IsNamedArg = ArgNo < NumRequiredArgs;
3602 if (IsRegCall && it->type->isStructureOrClassType())
3603 it->info = classifyRegCallStructType(it->type, NeededInt, NeededSSE);
3605 it->info = classifyArgumentType(it->type, FreeIntRegs, NeededInt,
3606 NeededSSE, IsNamedArg);
3608 // AMD64-ABI 3.2.3p3: If there are no registers available for any
3609 // eightbyte of an argument, the whole argument is passed on the
3610 // stack. If registers have already been assigned for some
3611 // eightbytes of such an argument, the assignments get reverted.
3612 if (FreeIntRegs >= NeededInt && FreeSSERegs >= NeededSSE) {
3613 FreeIntRegs -= NeededInt;
3614 FreeSSERegs -= NeededSSE;
3616 it->info = getIndirectResult(it->type, FreeIntRegs);
3621 static Address EmitX86_64VAArgFromMemory(CodeGenFunction &CGF,
3622 Address VAListAddr, QualType Ty) {
3623 Address overflow_arg_area_p = CGF.Builder.CreateStructGEP(
3624 VAListAddr, 2, CharUnits::fromQuantity(8), "overflow_arg_area_p");
3625 llvm::Value *overflow_arg_area =
3626 CGF.Builder.CreateLoad(overflow_arg_area_p, "overflow_arg_area");
3628 // AMD64-ABI 3.5.7p5: Step 7. Align l->overflow_arg_area upwards to a 16
3629 // byte boundary if alignment needed by type exceeds 8 byte boundary.
3630 // It isn't stated explicitly in the standard, but in practice we use
3631 // alignment greater than 16 where necessary.
3632 CharUnits Align = CGF.getContext().getTypeAlignInChars(Ty);
3633 if (Align > CharUnits::fromQuantity(8)) {
3634 overflow_arg_area = emitRoundPointerUpToAlignment(CGF, overflow_arg_area,
3638 // AMD64-ABI 3.5.7p5: Step 8. Fetch type from l->overflow_arg_area.
3639 llvm::Type *LTy = CGF.ConvertTypeForMem(Ty);
3641 CGF.Builder.CreateBitCast(overflow_arg_area,
3642 llvm::PointerType::getUnqual(LTy));
3644 // AMD64-ABI 3.5.7p5: Step 9. Set l->overflow_arg_area to:
3645 // l->overflow_arg_area + sizeof(type).
3646 // AMD64-ABI 3.5.7p5: Step 10. Align l->overflow_arg_area upwards to
3647 // an 8 byte boundary.
3649 uint64_t SizeInBytes = (CGF.getContext().getTypeSize(Ty) + 7) / 8;
3650 llvm::Value *Offset =
3651 llvm::ConstantInt::get(CGF.Int32Ty, (SizeInBytes + 7) & ~7);
3652 overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset,
3653 "overflow_arg_area.next");
3654 CGF.Builder.CreateStore(overflow_arg_area, overflow_arg_area_p);
3656 // AMD64-ABI 3.5.7p5: Step 11. Return the fetched type.
3657 return Address(Res, Align);
3660 Address X86_64ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
3661 QualType Ty) const {
3662 // Assume that va_list type is correct; should be pointer to LLVM type:
3666 // i8* overflow_arg_area;
3667 // i8* reg_save_area;
3669 unsigned neededInt, neededSSE;
3671 Ty = getContext().getCanonicalType(Ty);
3672 ABIArgInfo AI = classifyArgumentType(Ty, 0, neededInt, neededSSE,
3673 /*isNamedArg*/false);
3675 // AMD64-ABI 3.5.7p5: Step 1. Determine whether type may be passed
3676 // in the registers. If not go to step 7.
3677 if (!neededInt && !neededSSE)
3678 return EmitX86_64VAArgFromMemory(CGF, VAListAddr, Ty);
3680 // AMD64-ABI 3.5.7p5: Step 2. Compute num_gp to hold the number of
3681 // general purpose registers needed to pass type and num_fp to hold
3682 // the number of floating point registers needed.
3684 // AMD64-ABI 3.5.7p5: Step 3. Verify whether arguments fit into
3685 // registers. In the case: l->gp_offset > 48 - num_gp * 8 or
3686 // l->fp_offset > 304 - num_fp * 16 go to step 7.
3688 // NOTE: 304 is a typo, there are (6 * 8 + 8 * 16) = 176 bytes of
3689 // register save space).
3691 llvm::Value *InRegs = nullptr;
3692 Address gp_offset_p = Address::invalid(), fp_offset_p = Address::invalid();
3693 llvm::Value *gp_offset = nullptr, *fp_offset = nullptr;
3696 CGF.Builder.CreateStructGEP(VAListAddr, 0, CharUnits::Zero(),
3698 gp_offset = CGF.Builder.CreateLoad(gp_offset_p, "gp_offset");
3699 InRegs = llvm::ConstantInt::get(CGF.Int32Ty, 48 - neededInt * 8);
3700 InRegs = CGF.Builder.CreateICmpULE(gp_offset, InRegs, "fits_in_gp");
3705 CGF.Builder.CreateStructGEP(VAListAddr, 1, CharUnits::fromQuantity(4),
3707 fp_offset = CGF.Builder.CreateLoad(fp_offset_p, "fp_offset");
3708 llvm::Value *FitsInFP =
3709 llvm::ConstantInt::get(CGF.Int32Ty, 176 - neededSSE * 16);
3710 FitsInFP = CGF.Builder.CreateICmpULE(fp_offset, FitsInFP, "fits_in_fp");
3711 InRegs = InRegs ? CGF.Builder.CreateAnd(InRegs, FitsInFP) : FitsInFP;
3714 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
3715 llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem");
3716 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
3717 CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock);
3719 // Emit code to load the value if it was passed in registers.
3721 CGF.EmitBlock(InRegBlock);
3723 // AMD64-ABI 3.5.7p5: Step 4. Fetch type from l->reg_save_area with
3724 // an offset of l->gp_offset and/or l->fp_offset. This may require
3725 // copying to a temporary location in case the parameter is passed
3726 // in different register classes or requires an alignment greater
3727 // than 8 for general purpose registers and 16 for XMM registers.
3729 // FIXME: This really results in shameful code when we end up needing to
3730 // collect arguments from different places; often what should result in a
3731 // simple assembling of a structure from scattered addresses has many more
3732 // loads than necessary. Can we clean this up?
3733 llvm::Type *LTy = CGF.ConvertTypeForMem(Ty);
3734 llvm::Value *RegSaveArea = CGF.Builder.CreateLoad(
3735 CGF.Builder.CreateStructGEP(VAListAddr, 3, CharUnits::fromQuantity(16)),
3738 Address RegAddr = Address::invalid();
3739 if (neededInt && neededSSE) {
3741 assert(AI.isDirect() && "Unexpected ABI info for mixed regs");
3742 llvm::StructType *ST = cast<llvm::StructType>(AI.getCoerceToType());
3743 Address Tmp = CGF.CreateMemTemp(Ty);
3744 Tmp = CGF.Builder.CreateElementBitCast(Tmp, ST);
3745 assert(ST->getNumElements() == 2 && "Unexpected ABI info for mixed regs");
3746 llvm::Type *TyLo = ST->getElementType(0);
3747 llvm::Type *TyHi = ST->getElementType(1);
3748 assert((TyLo->isFPOrFPVectorTy() ^ TyHi->isFPOrFPVectorTy()) &&
3749 "Unexpected ABI info for mixed regs");
3750 llvm::Type *PTyLo = llvm::PointerType::getUnqual(TyLo);
3751 llvm::Type *PTyHi = llvm::PointerType::getUnqual(TyHi);
3752 llvm::Value *GPAddr = CGF.Builder.CreateGEP(RegSaveArea, gp_offset);
3753 llvm::Value *FPAddr = CGF.Builder.CreateGEP(RegSaveArea, fp_offset);
3754 llvm::Value *RegLoAddr = TyLo->isFPOrFPVectorTy() ? FPAddr : GPAddr;
3755 llvm::Value *RegHiAddr = TyLo->isFPOrFPVectorTy() ? GPAddr : FPAddr;
3757 // Copy the first element.
3758 // FIXME: Our choice of alignment here and below is probably pessimistic.
3759 llvm::Value *V = CGF.Builder.CreateAlignedLoad(
3760 TyLo, CGF.Builder.CreateBitCast(RegLoAddr, PTyLo),
3761 CharUnits::fromQuantity(getDataLayout().getABITypeAlignment(TyLo)));
3762 CGF.Builder.CreateStore(V,
3763 CGF.Builder.CreateStructGEP(Tmp, 0, CharUnits::Zero()));
3765 // Copy the second element.
3766 V = CGF.Builder.CreateAlignedLoad(
3767 TyHi, CGF.Builder.CreateBitCast(RegHiAddr, PTyHi),
3768 CharUnits::fromQuantity(getDataLayout().getABITypeAlignment(TyHi)));
3769 CharUnits Offset = CharUnits::fromQuantity(
3770 getDataLayout().getStructLayout(ST)->getElementOffset(1));
3771 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1, Offset));
3773 RegAddr = CGF.Builder.CreateElementBitCast(Tmp, LTy);
3774 } else if (neededInt) {
3775 RegAddr = Address(CGF.Builder.CreateGEP(RegSaveArea, gp_offset),
3776 CharUnits::fromQuantity(8));
3777 RegAddr = CGF.Builder.CreateElementBitCast(RegAddr, LTy);
3779 // Copy to a temporary if necessary to ensure the appropriate alignment.
3780 std::pair<CharUnits, CharUnits> SizeAlign =
3781 getContext().getTypeInfoInChars(Ty);
3782 uint64_t TySize = SizeAlign.first.getQuantity();
3783 CharUnits TyAlign = SizeAlign.second;
3785 // Copy into a temporary if the type is more aligned than the
3786 // register save area.
3787 if (TyAlign.getQuantity() > 8) {
3788 Address Tmp = CGF.CreateMemTemp(Ty);
3789 CGF.Builder.CreateMemCpy(Tmp, RegAddr, TySize, false);
3793 } else if (neededSSE == 1) {
3794 RegAddr = Address(CGF.Builder.CreateGEP(RegSaveArea, fp_offset),
3795 CharUnits::fromQuantity(16));
3796 RegAddr = CGF.Builder.CreateElementBitCast(RegAddr, LTy);
3798 assert(neededSSE == 2 && "Invalid number of needed registers!");
3799 // SSE registers are spaced 16 bytes apart in the register save
3800 // area, we need to collect the two eightbytes together.
3801 // The ABI isn't explicit about this, but it seems reasonable
3802 // to assume that the slots are 16-byte aligned, since the stack is
3803 // naturally 16-byte aligned and the prologue is expected to store
3804 // all the SSE registers to the RSA.
3805 Address RegAddrLo = Address(CGF.Builder.CreateGEP(RegSaveArea, fp_offset),
3806 CharUnits::fromQuantity(16));
3808 CGF.Builder.CreateConstInBoundsByteGEP(RegAddrLo,
3809 CharUnits::fromQuantity(16));
3810 llvm::Type *DoubleTy = CGF.DoubleTy;
3811 llvm::StructType *ST = llvm::StructType::get(DoubleTy, DoubleTy);
3813 Address Tmp = CGF.CreateMemTemp(Ty);
3814 Tmp = CGF.Builder.CreateElementBitCast(Tmp, ST);
3815 V = CGF.Builder.CreateLoad(
3816 CGF.Builder.CreateElementBitCast(RegAddrLo, DoubleTy));
3817 CGF.Builder.CreateStore(V,
3818 CGF.Builder.CreateStructGEP(Tmp, 0, CharUnits::Zero()));
3819 V = CGF.Builder.CreateLoad(
3820 CGF.Builder.CreateElementBitCast(RegAddrHi, DoubleTy));
3821 CGF.Builder.CreateStore(V,
3822 CGF.Builder.CreateStructGEP(Tmp, 1, CharUnits::fromQuantity(8)));
3824 RegAddr = CGF.Builder.CreateElementBitCast(Tmp, LTy);
3827 // AMD64-ABI 3.5.7p5: Step 5. Set:
3828 // l->gp_offset = l->gp_offset + num_gp * 8
3829 // l->fp_offset = l->fp_offset + num_fp * 16.
3831 llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededInt * 8);
3832 CGF.Builder.CreateStore(CGF.Builder.CreateAdd(gp_offset, Offset),
3836 llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededSSE * 16);
3837 CGF.Builder.CreateStore(CGF.Builder.CreateAdd(fp_offset, Offset),
3840 CGF.EmitBranch(ContBlock);
3842 // Emit code to load the value if it was passed in memory.
3844 CGF.EmitBlock(InMemBlock);
3845 Address MemAddr = EmitX86_64VAArgFromMemory(CGF, VAListAddr, Ty);
3847 // Return the appropriate result.
3849 CGF.EmitBlock(ContBlock);
3850 Address ResAddr = emitMergePHI(CGF, RegAddr, InRegBlock, MemAddr, InMemBlock,
3855 Address X86_64ABIInfo::EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
3856 QualType Ty) const {
3857 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*indirect*/ false,
3858 CGF.getContext().getTypeInfoInChars(Ty),
3859 CharUnits::fromQuantity(8),
3860 /*allowHigherAlign*/ false);
3864 WinX86_64ABIInfo::reclassifyHvaArgType(QualType Ty, unsigned &FreeSSERegs,
3865 const ABIArgInfo ¤t) const {
3866 // Assumes vectorCall calling convention.
3867 const Type *Base = nullptr;
3868 uint64_t NumElts = 0;
3870 if (!Ty->isBuiltinType() && !Ty->isVectorType() &&
3871 isHomogeneousAggregate(Ty, Base, NumElts) && FreeSSERegs >= NumElts) {
3872 FreeSSERegs -= NumElts;
3873 return getDirectX86Hva();
3878 ABIArgInfo WinX86_64ABIInfo::classify(QualType Ty, unsigned &FreeSSERegs,
3879 bool IsReturnType, bool IsVectorCall,
3880 bool IsRegCall) const {
3882 if (Ty->isVoidType())
3883 return ABIArgInfo::getIgnore();
3885 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
3886 Ty = EnumTy->getDecl()->getIntegerType();
3888 TypeInfo Info = getContext().getTypeInfo(Ty);
3889 uint64_t Width = Info.Width;
3890 CharUnits Align = getContext().toCharUnitsFromBits(Info.Align);
3892 const RecordType *RT = Ty->getAs<RecordType>();
3894 if (!IsReturnType) {
3895 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI()))
3896 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
3899 if (RT->getDecl()->hasFlexibleArrayMember())
3900 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
3904 const Type *Base = nullptr;
3905 uint64_t NumElts = 0;
3906 // vectorcall adds the concept of a homogenous vector aggregate, similar to
3908 if ((IsVectorCall || IsRegCall) &&
3909 isHomogeneousAggregate(Ty, Base, NumElts)) {
3911 if (FreeSSERegs >= NumElts) {
3912 FreeSSERegs -= NumElts;
3913 if (IsReturnType || Ty->isBuiltinType() || Ty->isVectorType())
3914 return ABIArgInfo::getDirect();
3915 return ABIArgInfo::getExpand();
3917 return ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
3918 } else if (IsVectorCall) {
3919 if (FreeSSERegs >= NumElts &&
3920 (IsReturnType || Ty->isBuiltinType() || Ty->isVectorType())) {
3921 FreeSSERegs -= NumElts;
3922 return ABIArgInfo::getDirect();
3923 } else if (IsReturnType) {
3924 return ABIArgInfo::getExpand();
3925 } else if (!Ty->isBuiltinType() && !Ty->isVectorType()) {
3926 // HVAs are delayed and reclassified in the 2nd step.
3927 return ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
3932 if (Ty->isMemberPointerType()) {
3933 // If the member pointer is represented by an LLVM int or ptr, pass it
3935 llvm::Type *LLTy = CGT.ConvertType(Ty);
3936 if (LLTy->isPointerTy() || LLTy->isIntegerTy())
3937 return ABIArgInfo::getDirect();
3940 if (RT || Ty->isAnyComplexType() || Ty->isMemberPointerType()) {
3941 // MS x64 ABI requirement: "Any argument that doesn't fit in 8 bytes, or is
3942 // not 1, 2, 4, or 8 bytes, must be passed by reference."
3943 if (Width > 64 || !llvm::isPowerOf2_64(Width))
3944 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
3946 // Otherwise, coerce it to a small integer.
3947 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Width));
3950 // Bool type is always extended to the ABI, other builtin types are not
3952 const BuiltinType *BT = Ty->getAs<BuiltinType>();
3953 if (BT && BT->getKind() == BuiltinType::Bool)
3954 return ABIArgInfo::getExtend();
3956 // Mingw64 GCC uses the old 80 bit extended precision floating point unit. It
3957 // passes them indirectly through memory.
3958 if (IsMingw64 && BT && BT->getKind() == BuiltinType::LongDouble) {
3959 const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat();
3960 if (LDF == &llvm::APFloat::x87DoubleExtended())
3961 return ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
3964 return ABIArgInfo::getDirect();
3967 void WinX86_64ABIInfo::computeVectorCallArgs(CGFunctionInfo &FI,
3968 unsigned FreeSSERegs,
3970 bool IsRegCall) const {
3972 for (auto &I : FI.arguments()) {
3973 // Vectorcall in x64 only permits the first 6 arguments to be passed
3974 // as XMM/YMM registers.
3975 if (Count < VectorcallMaxParamNumAsReg)
3976 I.info = classify(I.type, FreeSSERegs, false, IsVectorCall, IsRegCall);
3978 // Since these cannot be passed in registers, pretend no registers
3980 unsigned ZeroSSERegsAvail = 0;
3981 I.info = classify(I.type, /*FreeSSERegs=*/ZeroSSERegsAvail, false,
3982 IsVectorCall, IsRegCall);
3987 for (auto &I : FI.arguments()) {
3988 I.info = reclassifyHvaArgType(I.type, FreeSSERegs, I.info);
3992 void WinX86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const {
3994 FI.getCallingConvention() == llvm::CallingConv::X86_VectorCall;
3995 bool IsRegCall = FI.getCallingConvention() == llvm::CallingConv::X86_RegCall;
3997 unsigned FreeSSERegs = 0;
3999 // We can use up to 4 SSE return registers with vectorcall.
4001 } else if (IsRegCall) {
4002 // RegCall gives us 16 SSE registers.
4006 if (!getCXXABI().classifyReturnType(FI))
4007 FI.getReturnInfo() = classify(FI.getReturnType(), FreeSSERegs, true,
4008 IsVectorCall, IsRegCall);
4011 // We can use up to 6 SSE register parameters with vectorcall.
4013 } else if (IsRegCall) {
4014 // RegCall gives us 16 SSE registers, we can reuse the return registers.
4019 computeVectorCallArgs(FI, FreeSSERegs, IsVectorCall, IsRegCall);
4021 for (auto &I : FI.arguments())
4022 I.info = classify(I.type, FreeSSERegs, false, IsVectorCall, IsRegCall);
4027 Address WinX86_64ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
4028 QualType Ty) const {
4030 bool IsIndirect = false;
4032 // MS x64 ABI requirement: "Any argument that doesn't fit in 8 bytes, or is
4033 // not 1, 2, 4, or 8 bytes, must be passed by reference."
4034 if (isAggregateTypeForABI(Ty) || Ty->isMemberPointerType()) {
4035 uint64_t Width = getContext().getTypeSize(Ty);
4036 IsIndirect = Width > 64 || !llvm::isPowerOf2_64(Width);
4039 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect,
4040 CGF.getContext().getTypeInfoInChars(Ty),
4041 CharUnits::fromQuantity(8),
4042 /*allowHigherAlign*/ false);
4047 /// PPC32_SVR4_ABIInfo - The 32-bit PowerPC ELF (SVR4) ABI information.
4048 class PPC32_SVR4_ABIInfo : public DefaultABIInfo {
4049 bool IsSoftFloatABI;
4051 CharUnits getParamTypeAlignment(QualType Ty) const;
4054 PPC32_SVR4_ABIInfo(CodeGen::CodeGenTypes &CGT, bool SoftFloatABI)
4055 : DefaultABIInfo(CGT), IsSoftFloatABI(SoftFloatABI) {}
4057 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
4058 QualType Ty) const override;
4061 class PPC32TargetCodeGenInfo : public TargetCodeGenInfo {
4063 PPC32TargetCodeGenInfo(CodeGenTypes &CGT, bool SoftFloatABI)
4064 : TargetCodeGenInfo(new PPC32_SVR4_ABIInfo(CGT, SoftFloatABI)) {}
4066 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
4067 // This is recovered from gcc output.
4068 return 1; // r1 is the dedicated stack pointer
4071 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
4072 llvm::Value *Address) const override;
4076 CharUnits PPC32_SVR4_ABIInfo::getParamTypeAlignment(QualType Ty) const {
4077 // Complex types are passed just like their elements
4078 if (const ComplexType *CTy = Ty->getAs<ComplexType>())
4079 Ty = CTy->getElementType();
4081 if (Ty->isVectorType())
4082 return CharUnits::fromQuantity(getContext().getTypeSize(Ty) == 128 ? 16
4085 // For single-element float/vector structs, we consider the whole type
4086 // to have the same alignment requirements as its single element.
4087 const Type *AlignTy = nullptr;
4088 if (const Type *EltType = isSingleElementStruct(Ty, getContext())) {
4089 const BuiltinType *BT = EltType->getAs<BuiltinType>();
4090 if ((EltType->isVectorType() && getContext().getTypeSize(EltType) == 128) ||
4091 (BT && BT->isFloatingPoint()))
4096 return CharUnits::fromQuantity(AlignTy->isVectorType() ? 16 : 4);
4097 return CharUnits::fromQuantity(4);
4100 // TODO: this implementation is now likely redundant with
4101 // DefaultABIInfo::EmitVAArg.
4102 Address PPC32_SVR4_ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAList,
4103 QualType Ty) const {
4104 if (getTarget().getTriple().isOSDarwin()) {
4105 auto TI = getContext().getTypeInfoInChars(Ty);
4106 TI.second = getParamTypeAlignment(Ty);
4108 CharUnits SlotSize = CharUnits::fromQuantity(4);
4109 return emitVoidPtrVAArg(CGF, VAList, Ty,
4110 classifyArgumentType(Ty).isIndirect(), TI, SlotSize,
4111 /*AllowHigherAlign=*/true);
4114 const unsigned OverflowLimit = 8;
4115 if (const ComplexType *CTy = Ty->getAs<ComplexType>()) {
4116 // TODO: Implement this. For now ignore.
4118 return Address::invalid(); // FIXME?
4121 // struct __va_list_tag {
4122 // unsigned char gpr;
4123 // unsigned char fpr;
4124 // unsigned short reserved;
4125 // void *overflow_arg_area;
4126 // void *reg_save_area;
4129 bool isI64 = Ty->isIntegerType() && getContext().getTypeSize(Ty) == 64;
4131 Ty->isIntegerType() || Ty->isPointerType() || Ty->isAggregateType();
4132 bool isF64 = Ty->isFloatingType() && getContext().getTypeSize(Ty) == 64;
4134 // All aggregates are passed indirectly? That doesn't seem consistent
4135 // with the argument-lowering code.
4136 bool isIndirect = Ty->isAggregateType();
4138 CGBuilderTy &Builder = CGF.Builder;
4140 // The calling convention either uses 1-2 GPRs or 1 FPR.
4141 Address NumRegsAddr = Address::invalid();
4142 if (isInt || IsSoftFloatABI) {
4143 NumRegsAddr = Builder.CreateStructGEP(VAList, 0, CharUnits::Zero(), "gpr");
4145 NumRegsAddr = Builder.CreateStructGEP(VAList, 1, CharUnits::One(), "fpr");
4148 llvm::Value *NumRegs = Builder.CreateLoad(NumRegsAddr, "numUsedRegs");
4150 // "Align" the register count when TY is i64.
4151 if (isI64 || (isF64 && IsSoftFloatABI)) {
4152 NumRegs = Builder.CreateAdd(NumRegs, Builder.getInt8(1));
4153 NumRegs = Builder.CreateAnd(NumRegs, Builder.getInt8((uint8_t) ~1U));
4157 Builder.CreateICmpULT(NumRegs, Builder.getInt8(OverflowLimit), "cond");
4159 llvm::BasicBlock *UsingRegs = CGF.createBasicBlock("using_regs");
4160 llvm::BasicBlock *UsingOverflow = CGF.createBasicBlock("using_overflow");
4161 llvm::BasicBlock *Cont = CGF.createBasicBlock("cont");
4163 Builder.CreateCondBr(CC, UsingRegs, UsingOverflow);
4165 llvm::Type *DirectTy = CGF.ConvertType(Ty);
4166 if (isIndirect) DirectTy = DirectTy->getPointerTo(0);
4168 // Case 1: consume registers.
4169 Address RegAddr = Address::invalid();
4171 CGF.EmitBlock(UsingRegs);
4173 Address RegSaveAreaPtr =
4174 Builder.CreateStructGEP(VAList, 4, CharUnits::fromQuantity(8));
4175 RegAddr = Address(Builder.CreateLoad(RegSaveAreaPtr),
4176 CharUnits::fromQuantity(8));
4177 assert(RegAddr.getElementType() == CGF.Int8Ty);
4179 // Floating-point registers start after the general-purpose registers.
4180 if (!(isInt || IsSoftFloatABI)) {
4181 RegAddr = Builder.CreateConstInBoundsByteGEP(RegAddr,
4182 CharUnits::fromQuantity(32));
4185 // Get the address of the saved value by scaling the number of
4186 // registers we've used by the number of
4187 CharUnits RegSize = CharUnits::fromQuantity((isInt || IsSoftFloatABI) ? 4 : 8);
4188 llvm::Value *RegOffset =
4189 Builder.CreateMul(NumRegs, Builder.getInt8(RegSize.getQuantity()));
4190 RegAddr = Address(Builder.CreateInBoundsGEP(CGF.Int8Ty,
4191 RegAddr.getPointer(), RegOffset),
4192 RegAddr.getAlignment().alignmentOfArrayElement(RegSize));
4193 RegAddr = Builder.CreateElementBitCast(RegAddr, DirectTy);
4195 // Increase the used-register count.
4197 Builder.CreateAdd(NumRegs,
4198 Builder.getInt8((isI64 || (isF64 && IsSoftFloatABI)) ? 2 : 1));
4199 Builder.CreateStore(NumRegs, NumRegsAddr);
4201 CGF.EmitBranch(Cont);
4204 // Case 2: consume space in the overflow area.
4205 Address MemAddr = Address::invalid();
4207 CGF.EmitBlock(UsingOverflow);
4209 Builder.CreateStore(Builder.getInt8(OverflowLimit), NumRegsAddr);
4211 // Everything in the overflow area is rounded up to a size of at least 4.
4212 CharUnits OverflowAreaAlign = CharUnits::fromQuantity(4);
4216 auto TypeInfo = CGF.getContext().getTypeInfoInChars(Ty);
4217 Size = TypeInfo.first.alignTo(OverflowAreaAlign);
4219 Size = CGF.getPointerSize();
4222 Address OverflowAreaAddr =
4223 Builder.CreateStructGEP(VAList, 3, CharUnits::fromQuantity(4));
4224 Address OverflowArea(Builder.CreateLoad(OverflowAreaAddr, "argp.cur"),
4226 // Round up address of argument to alignment
4227 CharUnits Align = CGF.getContext().getTypeAlignInChars(Ty);
4228 if (Align > OverflowAreaAlign) {
4229 llvm::Value *Ptr = OverflowArea.getPointer();
4230 OverflowArea = Address(emitRoundPointerUpToAlignment(CGF, Ptr, Align),
4234 MemAddr = Builder.CreateElementBitCast(OverflowArea, DirectTy);
4236 // Increase the overflow area.
4237 OverflowArea = Builder.CreateConstInBoundsByteGEP(OverflowArea, Size);
4238 Builder.CreateStore(OverflowArea.getPointer(), OverflowAreaAddr);
4239 CGF.EmitBranch(Cont);
4242 CGF.EmitBlock(Cont);
4244 // Merge the cases with a phi.
4245 Address Result = emitMergePHI(CGF, RegAddr, UsingRegs, MemAddr, UsingOverflow,
4248 // Load the pointer if the argument was passed indirectly.
4250 Result = Address(Builder.CreateLoad(Result, "aggr"),
4251 getContext().getTypeAlignInChars(Ty));
4258 PPC32TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
4259 llvm::Value *Address) const {
4260 // This is calculated from the LLVM and GCC tables and verified
4261 // against gcc output. AFAIK all ABIs use the same encoding.
4263 CodeGen::CGBuilderTy &Builder = CGF.Builder;
4265 llvm::IntegerType *i8 = CGF.Int8Ty;
4266 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
4267 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
4268 llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16);
4270 // 0-31: r0-31, the 4-byte general-purpose registers
4271 AssignToArrayRange(Builder, Address, Four8, 0, 31);
4273 // 32-63: fp0-31, the 8-byte floating-point registers
4274 AssignToArrayRange(Builder, Address, Eight8, 32, 63);
4276 // 64-76 are various 4-byte special-purpose registers:
4283 AssignToArrayRange(Builder, Address, Four8, 64, 76);
4285 // 77-108: v0-31, the 16-byte vector registers
4286 AssignToArrayRange(Builder, Address, Sixteen8, 77, 108);
4293 AssignToArrayRange(Builder, Address, Four8, 109, 113);
4301 /// PPC64_SVR4_ABIInfo - The 64-bit PowerPC ELF (SVR4) ABI information.
4302 class PPC64_SVR4_ABIInfo : public ABIInfo {
4310 static const unsigned GPRBits = 64;
4313 bool IsSoftFloatABI;
4315 // A vector of float or double will be promoted to <4 x f32> or <4 x f64> and
4316 // will be passed in a QPX register.
4317 bool IsQPXVectorTy(const Type *Ty) const {
4321 if (const VectorType *VT = Ty->getAs<VectorType>()) {
4322 unsigned NumElements = VT->getNumElements();
4323 if (NumElements == 1)
4326 if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::Double)) {
4327 if (getContext().getTypeSize(Ty) <= 256)
4329 } else if (VT->getElementType()->
4330 isSpecificBuiltinType(BuiltinType::Float)) {
4331 if (getContext().getTypeSize(Ty) <= 128)
4339 bool IsQPXVectorTy(QualType Ty) const {
4340 return IsQPXVectorTy(Ty.getTypePtr());
4344 PPC64_SVR4_ABIInfo(CodeGen::CodeGenTypes &CGT, ABIKind Kind, bool HasQPX,
4346 : ABIInfo(CGT), Kind(Kind), HasQPX(HasQPX),
4347 IsSoftFloatABI(SoftFloatABI) {}
4349 bool isPromotableTypeForABI(QualType Ty) const;
4350 CharUnits getParamTypeAlignment(QualType Ty) const;
4352 ABIArgInfo classifyReturnType(QualType RetTy) const;
4353 ABIArgInfo classifyArgumentType(QualType Ty) const;
4355 bool isHomogeneousAggregateBaseType(QualType Ty) const override;
4356 bool isHomogeneousAggregateSmallEnough(const Type *Ty,
4357 uint64_t Members) const override;
4359 // TODO: We can add more logic to computeInfo to improve performance.
4360 // Example: For aggregate arguments that fit in a register, we could
4361 // use getDirectInReg (as is done below for structs containing a single
4362 // floating-point value) to avoid pushing them to memory on function
4363 // entry. This would require changing the logic in PPCISelLowering
4364 // when lowering the parameters in the caller and args in the callee.
4365 void computeInfo(CGFunctionInfo &FI) const override {
4366 if (!getCXXABI().classifyReturnType(FI))
4367 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
4368 for (auto &I : FI.arguments()) {
4369 // We rely on the default argument classification for the most part.
4370 // One exception: An aggregate containing a single floating-point
4371 // or vector item must be passed in a register if one is available.
4372 const Type *T = isSingleElementStruct(I.type, getContext());
4374 const BuiltinType *BT = T->getAs<BuiltinType>();
4375 if (IsQPXVectorTy(T) ||
4376 (T->isVectorType() && getContext().getTypeSize(T) == 128) ||
4377 (BT && BT->isFloatingPoint())) {
4379 I.info = ABIArgInfo::getDirectInReg(CGT.ConvertType(QT));
4383 I.info = classifyArgumentType(I.type);
4387 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
4388 QualType Ty) const override;
4391 class PPC64_SVR4_TargetCodeGenInfo : public TargetCodeGenInfo {
4394 PPC64_SVR4_TargetCodeGenInfo(CodeGenTypes &CGT,
4395 PPC64_SVR4_ABIInfo::ABIKind Kind, bool HasQPX,
4397 : TargetCodeGenInfo(new PPC64_SVR4_ABIInfo(CGT, Kind, HasQPX,
4400 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
4401 // This is recovered from gcc output.
4402 return 1; // r1 is the dedicated stack pointer
4405 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
4406 llvm::Value *Address) const override;
4409 class PPC64TargetCodeGenInfo : public DefaultTargetCodeGenInfo {
4411 PPC64TargetCodeGenInfo(CodeGenTypes &CGT) : DefaultTargetCodeGenInfo(CGT) {}
4413 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
4414 // This is recovered from gcc output.
4415 return 1; // r1 is the dedicated stack pointer
4418 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
4419 llvm::Value *Address) const override;
4424 // Return true if the ABI requires Ty to be passed sign- or zero-
4425 // extended to 64 bits.
4427 PPC64_SVR4_ABIInfo::isPromotableTypeForABI(QualType Ty) const {
4428 // Treat an enum type as its underlying type.
4429 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
4430 Ty = EnumTy->getDecl()->getIntegerType();
4432 // Promotable integer types are required to be promoted by the ABI.
4433 if (Ty->isPromotableIntegerType())
4436 // In addition to the usual promotable integer types, we also need to
4437 // extend all 32-bit types, since the ABI requires promotion to 64 bits.
4438 if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
4439 switch (BT->getKind()) {
4440 case BuiltinType::Int:
4441 case BuiltinType::UInt:
4450 /// isAlignedParamType - Determine whether a type requires 16-byte or
4451 /// higher alignment in the parameter area. Always returns at least 8.
4452 CharUnits PPC64_SVR4_ABIInfo::getParamTypeAlignment(QualType Ty) const {
4453 // Complex types are passed just like their elements.
4454 if (const ComplexType *CTy = Ty->getAs<ComplexType>())
4455 Ty = CTy->getElementType();
4457 // Only vector types of size 16 bytes need alignment (larger types are
4458 // passed via reference, smaller types are not aligned).
4459 if (IsQPXVectorTy(Ty)) {
4460 if (getContext().getTypeSize(Ty) > 128)
4461 return CharUnits::fromQuantity(32);
4463 return CharUnits::fromQuantity(16);
4464 } else if (Ty->isVectorType()) {
4465 return CharUnits::fromQuantity(getContext().getTypeSize(Ty) == 128 ? 16 : 8);
4468 // For single-element float/vector structs, we consider the whole type
4469 // to have the same alignment requirements as its single element.
4470 const Type *AlignAsType = nullptr;
4471 const Type *EltType = isSingleElementStruct(Ty, getContext());
4473 const BuiltinType *BT = EltType->getAs<BuiltinType>();
4474 if (IsQPXVectorTy(EltType) || (EltType->isVectorType() &&
4475 getContext().getTypeSize(EltType) == 128) ||
4476 (BT && BT->isFloatingPoint()))
4477 AlignAsType = EltType;
4480 // Likewise for ELFv2 homogeneous aggregates.
4481 const Type *Base = nullptr;
4482 uint64_t Members = 0;
4483 if (!AlignAsType && Kind == ELFv2 &&
4484 isAggregateTypeForABI(Ty) && isHomogeneousAggregate(Ty, Base, Members))
4487 // With special case aggregates, only vector base types need alignment.
4488 if (AlignAsType && IsQPXVectorTy(AlignAsType)) {
4489 if (getContext().getTypeSize(AlignAsType) > 128)
4490 return CharUnits::fromQuantity(32);
4492 return CharUnits::fromQuantity(16);
4493 } else if (AlignAsType) {
4494 return CharUnits::fromQuantity(AlignAsType->isVectorType() ? 16 : 8);
4497 // Otherwise, we only need alignment for any aggregate type that
4498 // has an alignment requirement of >= 16 bytes.
4499 if (isAggregateTypeForABI(Ty) && getContext().getTypeAlign(Ty) >= 128) {
4500 if (HasQPX && getContext().getTypeAlign(Ty) >= 256)
4501 return CharUnits::fromQuantity(32);
4502 return CharUnits::fromQuantity(16);
4505 return CharUnits::fromQuantity(8);
4508 /// isHomogeneousAggregate - Return true if a type is an ELFv2 homogeneous
4509 /// aggregate. Base is set to the base element type, and Members is set
4510 /// to the number of base elements.
4511 bool ABIInfo::isHomogeneousAggregate(QualType Ty, const Type *&Base,
4512 uint64_t &Members) const {
4513 if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
4514 uint64_t NElements = AT->getSize().getZExtValue();
4517 if (!isHomogeneousAggregate(AT->getElementType(), Base, Members))
4519 Members *= NElements;
4520 } else if (const RecordType *RT = Ty->getAs<RecordType>()) {
4521 const RecordDecl *RD = RT->getDecl();
4522 if (RD->hasFlexibleArrayMember())
4527 // If this is a C++ record, check the bases first.
4528 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
4529 for (const auto &I : CXXRD->bases()) {
4530 // Ignore empty records.
4531 if (isEmptyRecord(getContext(), I.getType(), true))
4534 uint64_t FldMembers;
4535 if (!isHomogeneousAggregate(I.getType(), Base, FldMembers))
4538 Members += FldMembers;
4542 for (const auto *FD : RD->fields()) {
4543 // Ignore (non-zero arrays of) empty records.
4544 QualType FT = FD->getType();
4545 while (const ConstantArrayType *AT =
4546 getContext().getAsConstantArrayType(FT)) {
4547 if (AT->getSize().getZExtValue() == 0)
4549 FT = AT->getElementType();
4551 if (isEmptyRecord(getContext(), FT, true))
4554 // For compatibility with GCC, ignore empty bitfields in C++ mode.
4555 if (getContext().getLangOpts().CPlusPlus &&
4556 FD->isBitField() && FD->getBitWidthValue(getContext()) == 0)
4559 uint64_t FldMembers;
4560 if (!isHomogeneousAggregate(FD->getType(), Base, FldMembers))
4563 Members = (RD->isUnion() ?
4564 std::max(Members, FldMembers) : Members + FldMembers);
4570 // Ensure there is no padding.
4571 if (getContext().getTypeSize(Base) * Members !=
4572 getContext().getTypeSize(Ty))
4576 if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
4578 Ty = CT->getElementType();
4581 // Most ABIs only support float, double, and some vector type widths.
4582 if (!isHomogeneousAggregateBaseType(Ty))
4585 // The base type must be the same for all members. Types that
4586 // agree in both total size and mode (float vs. vector) are
4587 // treated as being equivalent here.
4588 const Type *TyPtr = Ty.getTypePtr();
4591 // If it's a non-power-of-2 vector, its size is already a power-of-2,
4592 // so make sure to widen it explicitly.
4593 if (const VectorType *VT = Base->getAs<VectorType>()) {
4594 QualType EltTy = VT->getElementType();
4595 unsigned NumElements =
4596 getContext().getTypeSize(VT) / getContext().getTypeSize(EltTy);
4598 .getVectorType(EltTy, NumElements, VT->getVectorKind())
4603 if (Base->isVectorType() != TyPtr->isVectorType() ||
4604 getContext().getTypeSize(Base) != getContext().getTypeSize(TyPtr))
4607 return Members > 0 && isHomogeneousAggregateSmallEnough(Base, Members);
4610 bool PPC64_SVR4_ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
4611 // Homogeneous aggregates for ELFv2 must have base types of float,
4612 // double, long double, or 128-bit vectors.
4613 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
4614 if (BT->getKind() == BuiltinType::Float ||
4615 BT->getKind() == BuiltinType::Double ||
4616 BT->getKind() == BuiltinType::LongDouble) {
4622 if (const VectorType *VT = Ty->getAs<VectorType>()) {
4623 if (getContext().getTypeSize(VT) == 128 || IsQPXVectorTy(Ty))
4629 bool PPC64_SVR4_ABIInfo::isHomogeneousAggregateSmallEnough(
4630 const Type *Base, uint64_t Members) const {
4631 // Vector types require one register, floating point types require one
4632 // or two registers depending on their size.
4634 Base->isVectorType() ? 1 : (getContext().getTypeSize(Base) + 63) / 64;
4636 // Homogeneous Aggregates may occupy at most 8 registers.
4637 return Members * NumRegs <= 8;
4641 PPC64_SVR4_ABIInfo::classifyArgumentType(QualType Ty) const {
4642 Ty = useFirstFieldIfTransparentUnion(Ty);
4644 if (Ty->isAnyComplexType())
4645 return ABIArgInfo::getDirect();
4647 // Non-Altivec vector types are passed in GPRs (smaller than 16 bytes)
4648 // or via reference (larger than 16 bytes).
4649 if (Ty->isVectorType() && !IsQPXVectorTy(Ty)) {
4650 uint64_t Size = getContext().getTypeSize(Ty);
4652 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
4653 else if (Size < 128) {
4654 llvm::Type *CoerceTy = llvm::IntegerType::get(getVMContext(), Size);
4655 return ABIArgInfo::getDirect(CoerceTy);
4659 if (isAggregateTypeForABI(Ty)) {
4660 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
4661 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
4663 uint64_t ABIAlign = getParamTypeAlignment(Ty).getQuantity();
4664 uint64_t TyAlign = getContext().getTypeAlignInChars(Ty).getQuantity();
4666 // ELFv2 homogeneous aggregates are passed as array types.
4667 const Type *Base = nullptr;
4668 uint64_t Members = 0;
4669 if (Kind == ELFv2 &&
4670 isHomogeneousAggregate(Ty, Base, Members)) {
4671 llvm::Type *BaseTy = CGT.ConvertType(QualType(Base, 0));
4672 llvm::Type *CoerceTy = llvm::ArrayType::get(BaseTy, Members);
4673 return ABIArgInfo::getDirect(CoerceTy);
4676 // If an aggregate may end up fully in registers, we do not
4677 // use the ByVal method, but pass the aggregate as array.
4678 // This is usually beneficial since we avoid forcing the
4679 // back-end to store the argument to memory.
4680 uint64_t Bits = getContext().getTypeSize(Ty);
4681 if (Bits > 0 && Bits <= 8 * GPRBits) {
4682 llvm::Type *CoerceTy;
4684 // Types up to 8 bytes are passed as integer type (which will be
4685 // properly aligned in the argument save area doubleword).
4686 if (Bits <= GPRBits)
4688 llvm::IntegerType::get(getVMContext(), llvm::alignTo(Bits, 8));
4689 // Larger types are passed as arrays, with the base type selected
4690 // according to the required alignment in the save area.
4692 uint64_t RegBits = ABIAlign * 8;
4693 uint64_t NumRegs = llvm::alignTo(Bits, RegBits) / RegBits;
4694 llvm::Type *RegTy = llvm::IntegerType::get(getVMContext(), RegBits);
4695 CoerceTy = llvm::ArrayType::get(RegTy, NumRegs);
4698 return ABIArgInfo::getDirect(CoerceTy);
4701 // All other aggregates are passed ByVal.
4702 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(ABIAlign),
4704 /*Realign=*/TyAlign > ABIAlign);
4707 return (isPromotableTypeForABI(Ty) ?
4708 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
4712 PPC64_SVR4_ABIInfo::classifyReturnType(QualType RetTy) const {
4713 if (RetTy->isVoidType())
4714 return ABIArgInfo::getIgnore();
4716 if (RetTy->isAnyComplexType())
4717 return ABIArgInfo::getDirect();
4719 // Non-Altivec vector types are returned in GPRs (smaller than 16 bytes)
4720 // or via reference (larger than 16 bytes).
4721 if (RetTy->isVectorType() && !IsQPXVectorTy(RetTy)) {
4722 uint64_t Size = getContext().getTypeSize(RetTy);
4724 return getNaturalAlignIndirect(RetTy);
4725 else if (Size < 128) {
4726 llvm::Type *CoerceTy = llvm::IntegerType::get(getVMContext(), Size);
4727 return ABIArgInfo::getDirect(CoerceTy);
4731 if (isAggregateTypeForABI(RetTy)) {
4732 // ELFv2 homogeneous aggregates are returned as array types.
4733 const Type *Base = nullptr;
4734 uint64_t Members = 0;
4735 if (Kind == ELFv2 &&
4736 isHomogeneousAggregate(RetTy, Base, Members)) {
4737 llvm::Type *BaseTy = CGT.ConvertType(QualType(Base, 0));
4738 llvm::Type *CoerceTy = llvm::ArrayType::get(BaseTy, Members);
4739 return ABIArgInfo::getDirect(CoerceTy);
4742 // ELFv2 small aggregates are returned in up to two registers.
4743 uint64_t Bits = getContext().getTypeSize(RetTy);
4744 if (Kind == ELFv2 && Bits <= 2 * GPRBits) {
4746 return ABIArgInfo::getIgnore();
4748 llvm::Type *CoerceTy;
4749 if (Bits > GPRBits) {
4750 CoerceTy = llvm::IntegerType::get(getVMContext(), GPRBits);
4751 CoerceTy = llvm::StructType::get(CoerceTy, CoerceTy);
4754 llvm::IntegerType::get(getVMContext(), llvm::alignTo(Bits, 8));
4755 return ABIArgInfo::getDirect(CoerceTy);
4758 // All other aggregates are returned indirectly.
4759 return getNaturalAlignIndirect(RetTy);
4762 return (isPromotableTypeForABI(RetTy) ?
4763 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
4766 // Based on ARMABIInfo::EmitVAArg, adjusted for 64-bit machine.
4767 Address PPC64_SVR4_ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
4768 QualType Ty) const {
4769 auto TypeInfo = getContext().getTypeInfoInChars(Ty);
4770 TypeInfo.second = getParamTypeAlignment(Ty);
4772 CharUnits SlotSize = CharUnits::fromQuantity(8);
4774 // If we have a complex type and the base type is smaller than 8 bytes,
4775 // the ABI calls for the real and imaginary parts to be right-adjusted
4776 // in separate doublewords. However, Clang expects us to produce a
4777 // pointer to a structure with the two parts packed tightly. So generate
4778 // loads of the real and imaginary parts relative to the va_list pointer,
4779 // and store them to a temporary structure.
4780 if (const ComplexType *CTy = Ty->getAs<ComplexType>()) {
4781 CharUnits EltSize = TypeInfo.first / 2;
4782 if (EltSize < SlotSize) {
4783 Address Addr = emitVoidPtrDirectVAArg(CGF, VAListAddr, CGF.Int8Ty,
4784 SlotSize * 2, SlotSize,
4785 SlotSize, /*AllowHigher*/ true);
4787 Address RealAddr = Addr;
4788 Address ImagAddr = RealAddr;
4789 if (CGF.CGM.getDataLayout().isBigEndian()) {
4790 RealAddr = CGF.Builder.CreateConstInBoundsByteGEP(RealAddr,
4791 SlotSize - EltSize);
4792 ImagAddr = CGF.Builder.CreateConstInBoundsByteGEP(ImagAddr,
4793 2 * SlotSize - EltSize);
4795 ImagAddr = CGF.Builder.CreateConstInBoundsByteGEP(RealAddr, SlotSize);
4798 llvm::Type *EltTy = CGF.ConvertTypeForMem(CTy->getElementType());
4799 RealAddr = CGF.Builder.CreateElementBitCast(RealAddr, EltTy);
4800 ImagAddr = CGF.Builder.CreateElementBitCast(ImagAddr, EltTy);
4801 llvm::Value *Real = CGF.Builder.CreateLoad(RealAddr, ".vareal");
4802 llvm::Value *Imag = CGF.Builder.CreateLoad(ImagAddr, ".vaimag");
4804 Address Temp = CGF.CreateMemTemp(Ty, "vacplx");
4805 CGF.EmitStoreOfComplex({Real, Imag}, CGF.MakeAddrLValue(Temp, Ty),
4811 // Otherwise, just use the general rule.
4812 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*Indirect*/ false,
4813 TypeInfo, SlotSize, /*AllowHigher*/ true);
4817 PPC64_initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
4818 llvm::Value *Address) {
4819 // This is calculated from the LLVM and GCC tables and verified
4820 // against gcc output. AFAIK all ABIs use the same encoding.
4822 CodeGen::CGBuilderTy &Builder = CGF.Builder;
4824 llvm::IntegerType *i8 = CGF.Int8Ty;
4825 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
4826 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
4827 llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16);
4829 // 0-31: r0-31, the 8-byte general-purpose registers
4830 AssignToArrayRange(Builder, Address, Eight8, 0, 31);
4832 // 32-63: fp0-31, the 8-byte floating-point registers
4833 AssignToArrayRange(Builder, Address, Eight8, 32, 63);
4835 // 64-67 are various 8-byte special-purpose registers:
4840 AssignToArrayRange(Builder, Address, Eight8, 64, 67);
4842 // 68-76 are various 4-byte special-purpose registers:
4845 AssignToArrayRange(Builder, Address, Four8, 68, 76);
4847 // 77-108: v0-31, the 16-byte vector registers
4848 AssignToArrayRange(Builder, Address, Sixteen8, 77, 108);
4858 AssignToArrayRange(Builder, Address, Eight8, 109, 116);
4864 PPC64_SVR4_TargetCodeGenInfo::initDwarfEHRegSizeTable(
4865 CodeGen::CodeGenFunction &CGF,
4866 llvm::Value *Address) const {
4868 return PPC64_initDwarfEHRegSizeTable(CGF, Address);
4872 PPC64TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
4873 llvm::Value *Address) const {
4875 return PPC64_initDwarfEHRegSizeTable(CGF, Address);
4878 //===----------------------------------------------------------------------===//
4879 // AArch64 ABI Implementation
4880 //===----------------------------------------------------------------------===//
4884 class AArch64ABIInfo : public SwiftABIInfo {
4896 AArch64ABIInfo(CodeGenTypes &CGT, ABIKind Kind)
4897 : SwiftABIInfo(CGT), Kind(Kind) {}
4900 ABIKind getABIKind() const { return Kind; }
4901 bool isDarwinPCS() const { return Kind == DarwinPCS; }
4903 ABIArgInfo classifyReturnType(QualType RetTy) const;
4904 ABIArgInfo classifyArgumentType(QualType RetTy) const;
4905 bool isHomogeneousAggregateBaseType(QualType Ty) const override;
4906 bool isHomogeneousAggregateSmallEnough(const Type *Ty,
4907 uint64_t Members) const override;
4909 bool isIllegalVectorType(QualType Ty) const;
4911 void computeInfo(CGFunctionInfo &FI) const override {
4912 if (!getCXXABI().classifyReturnType(FI))
4913 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
4915 for (auto &it : FI.arguments())
4916 it.info = classifyArgumentType(it.type);
4919 Address EmitDarwinVAArg(Address VAListAddr, QualType Ty,
4920 CodeGenFunction &CGF) const;
4922 Address EmitAAPCSVAArg(Address VAListAddr, QualType Ty,
4923 CodeGenFunction &CGF) const;
4925 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
4926 QualType Ty) const override {
4927 return Kind == Win64 ? EmitMSVAArg(CGF, VAListAddr, Ty)
4928 : isDarwinPCS() ? EmitDarwinVAArg(VAListAddr, Ty, CGF)
4929 : EmitAAPCSVAArg(VAListAddr, Ty, CGF);
4932 Address EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
4933 QualType Ty) const override;
4935 bool shouldPassIndirectlyForSwift(CharUnits totalSize,
4936 ArrayRef<llvm::Type*> scalars,
4937 bool asReturnValue) const override {
4938 return occupiesMoreThan(CGT, scalars, /*total*/ 4);
4940 bool isSwiftErrorInRegister() const override {
4944 bool isLegalVectorTypeForSwift(CharUnits totalSize, llvm::Type *eltTy,
4945 unsigned elts) const override;
4948 class AArch64TargetCodeGenInfo : public TargetCodeGenInfo {
4950 AArch64TargetCodeGenInfo(CodeGenTypes &CGT, AArch64ABIInfo::ABIKind Kind)
4951 : TargetCodeGenInfo(new AArch64ABIInfo(CGT, Kind)) {}
4953 StringRef getARCRetainAutoreleasedReturnValueMarker() const override {
4954 return "mov\tfp, fp\t\t// marker for objc_retainAutoreleaseReturnValue";
4957 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
4961 bool doesReturnSlotInterfereWithArgs() const override { return false; }
4964 class WindowsAArch64TargetCodeGenInfo : public AArch64TargetCodeGenInfo {
4966 WindowsAArch64TargetCodeGenInfo(CodeGenTypes &CGT, AArch64ABIInfo::ABIKind K)
4967 : AArch64TargetCodeGenInfo(CGT, K) {}
4969 void getDependentLibraryOption(llvm::StringRef Lib,
4970 llvm::SmallString<24> &Opt) const override {
4971 Opt = "/DEFAULTLIB:" + qualifyWindowsLibrary(Lib);
4974 void getDetectMismatchOption(llvm::StringRef Name, llvm::StringRef Value,
4975 llvm::SmallString<32> &Opt) const override {
4976 Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\"";
4981 ABIArgInfo AArch64ABIInfo::classifyArgumentType(QualType Ty) const {
4982 Ty = useFirstFieldIfTransparentUnion(Ty);
4984 // Handle illegal vector types here.
4985 if (isIllegalVectorType(Ty)) {
4986 uint64_t Size = getContext().getTypeSize(Ty);
4987 // Android promotes <2 x i8> to i16, not i32
4988 if (isAndroid() && (Size <= 16)) {
4989 llvm::Type *ResType = llvm::Type::getInt16Ty(getVMContext());
4990 return ABIArgInfo::getDirect(ResType);
4993 llvm::Type *ResType = llvm::Type::getInt32Ty(getVMContext());
4994 return ABIArgInfo::getDirect(ResType);
4997 llvm::Type *ResType =
4998 llvm::VectorType::get(llvm::Type::getInt32Ty(getVMContext()), 2);
4999 return ABIArgInfo::getDirect(ResType);
5002 llvm::Type *ResType =
5003 llvm::VectorType::get(llvm::Type::getInt32Ty(getVMContext()), 4);
5004 return ABIArgInfo::getDirect(ResType);
5006 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
5009 if (!isAggregateTypeForABI(Ty)) {
5010 // Treat an enum type as its underlying type.
5011 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
5012 Ty = EnumTy->getDecl()->getIntegerType();
5014 return (Ty->isPromotableIntegerType() && isDarwinPCS()
5015 ? ABIArgInfo::getExtend()
5016 : ABIArgInfo::getDirect());
5019 // Structures with either a non-trivial destructor or a non-trivial
5020 // copy constructor are always indirect.
5021 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) {
5022 return getNaturalAlignIndirect(Ty, /*ByVal=*/RAA ==
5023 CGCXXABI::RAA_DirectInMemory);
5026 // Empty records are always ignored on Darwin, but actually passed in C++ mode
5027 // elsewhere for GNU compatibility.
5028 uint64_t Size = getContext().getTypeSize(Ty);
5029 bool IsEmpty = isEmptyRecord(getContext(), Ty, true);
5030 if (IsEmpty || Size == 0) {
5031 if (!getContext().getLangOpts().CPlusPlus || isDarwinPCS())
5032 return ABIArgInfo::getIgnore();
5034 // GNU C mode. The only argument that gets ignored is an empty one with size
5036 if (IsEmpty && Size == 0)
5037 return ABIArgInfo::getIgnore();
5038 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
5041 // Homogeneous Floating-point Aggregates (HFAs) need to be expanded.
5042 const Type *Base = nullptr;
5043 uint64_t Members = 0;
5044 if (isHomogeneousAggregate(Ty, Base, Members)) {
5045 return ABIArgInfo::getDirect(
5046 llvm::ArrayType::get(CGT.ConvertType(QualType(Base, 0)), Members));
5049 // Aggregates <= 16 bytes are passed directly in registers or on the stack.
5051 // On RenderScript, coerce Aggregates <= 16 bytes to an integer array of
5052 // same size and alignment.
5053 if (getTarget().isRenderScriptTarget()) {
5054 return coerceToIntArray(Ty, getContext(), getVMContext());
5056 unsigned Alignment = getContext().getTypeAlign(Ty);
5057 Size = llvm::alignTo(Size, 64); // round up to multiple of 8 bytes
5059 // We use a pair of i64 for 16-byte aggregate with 8-byte alignment.
5060 // For aggregates with 16-byte alignment, we use i128.
5061 if (Alignment < 128 && Size == 128) {
5062 llvm::Type *BaseTy = llvm::Type::getInt64Ty(getVMContext());
5063 return ABIArgInfo::getDirect(llvm::ArrayType::get(BaseTy, Size / 64));
5065 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Size));
5068 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
5071 ABIArgInfo AArch64ABIInfo::classifyReturnType(QualType RetTy) const {
5072 if (RetTy->isVoidType())
5073 return ABIArgInfo::getIgnore();
5075 // Large vector types should be returned via memory.
5076 if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 128)
5077 return getNaturalAlignIndirect(RetTy);
5079 if (!isAggregateTypeForABI(RetTy)) {
5080 // Treat an enum type as its underlying type.
5081 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
5082 RetTy = EnumTy->getDecl()->getIntegerType();
5084 return (RetTy->isPromotableIntegerType() && isDarwinPCS()
5085 ? ABIArgInfo::getExtend()
5086 : ABIArgInfo::getDirect());
5089 uint64_t Size = getContext().getTypeSize(RetTy);
5090 if (isEmptyRecord(getContext(), RetTy, true) || Size == 0)
5091 return ABIArgInfo::getIgnore();
5093 const Type *Base = nullptr;
5094 uint64_t Members = 0;
5095 if (isHomogeneousAggregate(RetTy, Base, Members))
5096 // Homogeneous Floating-point Aggregates (HFAs) are returned directly.
5097 return ABIArgInfo::getDirect();
5099 // Aggregates <= 16 bytes are returned directly in registers or on the stack.
5101 // On RenderScript, coerce Aggregates <= 16 bytes to an integer array of
5102 // same size and alignment.
5103 if (getTarget().isRenderScriptTarget()) {
5104 return coerceToIntArray(RetTy, getContext(), getVMContext());
5106 unsigned Alignment = getContext().getTypeAlign(RetTy);
5107 Size = llvm::alignTo(Size, 64); // round up to multiple of 8 bytes
5109 // We use a pair of i64 for 16-byte aggregate with 8-byte alignment.
5110 // For aggregates with 16-byte alignment, we use i128.
5111 if (Alignment < 128 && Size == 128) {
5112 llvm::Type *BaseTy = llvm::Type::getInt64Ty(getVMContext());
5113 return ABIArgInfo::getDirect(llvm::ArrayType::get(BaseTy, Size / 64));
5115 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Size));
5118 return getNaturalAlignIndirect(RetTy);
5121 /// isIllegalVectorType - check whether the vector type is legal for AArch64.
5122 bool AArch64ABIInfo::isIllegalVectorType(QualType Ty) const {
5123 if (const VectorType *VT = Ty->getAs<VectorType>()) {
5124 // Check whether VT is legal.
5125 unsigned NumElements = VT->getNumElements();
5126 uint64_t Size = getContext().getTypeSize(VT);
5127 // NumElements should be power of 2.
5128 if (!llvm::isPowerOf2_32(NumElements))
5130 return Size != 64 && (Size != 128 || NumElements == 1);
5135 bool AArch64ABIInfo::isLegalVectorTypeForSwift(CharUnits totalSize,
5137 unsigned elts) const {
5138 if (!llvm::isPowerOf2_32(elts))
5140 if (totalSize.getQuantity() != 8 &&
5141 (totalSize.getQuantity() != 16 || elts == 1))
5146 bool AArch64ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
5147 // Homogeneous aggregates for AAPCS64 must have base types of a floating
5148 // point type or a short-vector type. This is the same as the 32-bit ABI,
5149 // but with the difference that any floating-point type is allowed,
5150 // including __fp16.
5151 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
5152 if (BT->isFloatingPoint())
5154 } else if (const VectorType *VT = Ty->getAs<VectorType>()) {
5155 unsigned VecSize = getContext().getTypeSize(VT);
5156 if (VecSize == 64 || VecSize == 128)
5162 bool AArch64ABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base,
5163 uint64_t Members) const {
5164 return Members <= 4;
5167 Address AArch64ABIInfo::EmitAAPCSVAArg(Address VAListAddr,
5169 CodeGenFunction &CGF) const {
5170 ABIArgInfo AI = classifyArgumentType(Ty);
5171 bool IsIndirect = AI.isIndirect();
5173 llvm::Type *BaseTy = CGF.ConvertType(Ty);
5175 BaseTy = llvm::PointerType::getUnqual(BaseTy);
5176 else if (AI.getCoerceToType())
5177 BaseTy = AI.getCoerceToType();
5179 unsigned NumRegs = 1;
5180 if (llvm::ArrayType *ArrTy = dyn_cast<llvm::ArrayType>(BaseTy)) {
5181 BaseTy = ArrTy->getElementType();
5182 NumRegs = ArrTy->getNumElements();
5184 bool IsFPR = BaseTy->isFloatingPointTy() || BaseTy->isVectorTy();
5186 // The AArch64 va_list type and handling is specified in the Procedure Call
5187 // Standard, section B.4:
5197 llvm::BasicBlock *MaybeRegBlock = CGF.createBasicBlock("vaarg.maybe_reg");
5198 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
5199 llvm::BasicBlock *OnStackBlock = CGF.createBasicBlock("vaarg.on_stack");
5200 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
5202 auto TyInfo = getContext().getTypeInfoInChars(Ty);
5203 CharUnits TyAlign = TyInfo.second;
5205 Address reg_offs_p = Address::invalid();
5206 llvm::Value *reg_offs = nullptr;
5208 CharUnits reg_top_offset;
5209 int RegSize = IsIndirect ? 8 : TyInfo.first.getQuantity();
5211 // 3 is the field number of __gr_offs
5213 CGF.Builder.CreateStructGEP(VAListAddr, 3, CharUnits::fromQuantity(24),
5215 reg_offs = CGF.Builder.CreateLoad(reg_offs_p, "gr_offs");
5216 reg_top_index = 1; // field number for __gr_top
5217 reg_top_offset = CharUnits::fromQuantity(8);
5218 RegSize = llvm::alignTo(RegSize, 8);
5220 // 4 is the field number of __vr_offs.
5222 CGF.Builder.CreateStructGEP(VAListAddr, 4, CharUnits::fromQuantity(28),
5224 reg_offs = CGF.Builder.CreateLoad(reg_offs_p, "vr_offs");
5225 reg_top_index = 2; // field number for __vr_top
5226 reg_top_offset = CharUnits::fromQuantity(16);
5227 RegSize = 16 * NumRegs;
5230 //=======================================
5231 // Find out where argument was passed
5232 //=======================================
5234 // If reg_offs >= 0 we're already using the stack for this type of
5235 // argument. We don't want to keep updating reg_offs (in case it overflows,
5236 // though anyone passing 2GB of arguments, each at most 16 bytes, deserves
5237 // whatever they get).
5238 llvm::Value *UsingStack = nullptr;
5239 UsingStack = CGF.Builder.CreateICmpSGE(
5240 reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, 0));
5242 CGF.Builder.CreateCondBr(UsingStack, OnStackBlock, MaybeRegBlock);
5244 // Otherwise, at least some kind of argument could go in these registers, the
5245 // question is whether this particular type is too big.
5246 CGF.EmitBlock(MaybeRegBlock);
5248 // Integer arguments may need to correct register alignment (for example a
5249 // "struct { __int128 a; };" gets passed in x_2N, x_{2N+1}). In this case we
5250 // align __gr_offs to calculate the potential address.
5251 if (!IsFPR && !IsIndirect && TyAlign.getQuantity() > 8) {
5252 int Align = TyAlign.getQuantity();
5254 reg_offs = CGF.Builder.CreateAdd(
5255 reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, Align - 1),
5257 reg_offs = CGF.Builder.CreateAnd(
5258 reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, -Align),
5262 // Update the gr_offs/vr_offs pointer for next call to va_arg on this va_list.
5263 // The fact that this is done unconditionally reflects the fact that
5264 // allocating an argument to the stack also uses up all the remaining
5265 // registers of the appropriate kind.
5266 llvm::Value *NewOffset = nullptr;
5267 NewOffset = CGF.Builder.CreateAdd(
5268 reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, RegSize), "new_reg_offs");
5269 CGF.Builder.CreateStore(NewOffset, reg_offs_p);
5271 // Now we're in a position to decide whether this argument really was in
5272 // registers or not.
5273 llvm::Value *InRegs = nullptr;
5274 InRegs = CGF.Builder.CreateICmpSLE(
5275 NewOffset, llvm::ConstantInt::get(CGF.Int32Ty, 0), "inreg");
5277 CGF.Builder.CreateCondBr(InRegs, InRegBlock, OnStackBlock);
5279 //=======================================
5280 // Argument was in registers
5281 //=======================================
5283 // Now we emit the code for if the argument was originally passed in
5284 // registers. First start the appropriate block:
5285 CGF.EmitBlock(InRegBlock);
5287 llvm::Value *reg_top = nullptr;
5288 Address reg_top_p = CGF.Builder.CreateStructGEP(VAListAddr, reg_top_index,
5289 reg_top_offset, "reg_top_p");
5290 reg_top = CGF.Builder.CreateLoad(reg_top_p, "reg_top");
5291 Address BaseAddr(CGF.Builder.CreateInBoundsGEP(reg_top, reg_offs),
5292 CharUnits::fromQuantity(IsFPR ? 16 : 8));
5293 Address RegAddr = Address::invalid();
5294 llvm::Type *MemTy = CGF.ConvertTypeForMem(Ty);
5297 // If it's been passed indirectly (actually a struct), whatever we find from
5298 // stored registers or on the stack will actually be a struct **.
5299 MemTy = llvm::PointerType::getUnqual(MemTy);
5302 const Type *Base = nullptr;
5303 uint64_t NumMembers = 0;
5304 bool IsHFA = isHomogeneousAggregate(Ty, Base, NumMembers);
5305 if (IsHFA && NumMembers > 1) {
5306 // Homogeneous aggregates passed in registers will have their elements split
5307 // and stored 16-bytes apart regardless of size (they're notionally in qN,
5308 // qN+1, ...). We reload and store into a temporary local variable
5310 assert(!IsIndirect && "Homogeneous aggregates should be passed directly");
5311 auto BaseTyInfo = getContext().getTypeInfoInChars(QualType(Base, 0));
5312 llvm::Type *BaseTy = CGF.ConvertType(QualType(Base, 0));
5313 llvm::Type *HFATy = llvm::ArrayType::get(BaseTy, NumMembers);
5314 Address Tmp = CGF.CreateTempAlloca(HFATy,
5315 std::max(TyAlign, BaseTyInfo.second));
5317 // On big-endian platforms, the value will be right-aligned in its slot.
5319 if (CGF.CGM.getDataLayout().isBigEndian() &&
5320 BaseTyInfo.first.getQuantity() < 16)
5321 Offset = 16 - BaseTyInfo.first.getQuantity();
5323 for (unsigned i = 0; i < NumMembers; ++i) {
5324 CharUnits BaseOffset = CharUnits::fromQuantity(16 * i + Offset);
5326 CGF.Builder.CreateConstInBoundsByteGEP(BaseAddr, BaseOffset);
5327 LoadAddr = CGF.Builder.CreateElementBitCast(LoadAddr, BaseTy);
5330 CGF.Builder.CreateConstArrayGEP(Tmp, i, BaseTyInfo.first);
5332 llvm::Value *Elem = CGF.Builder.CreateLoad(LoadAddr);
5333 CGF.Builder.CreateStore(Elem, StoreAddr);
5336 RegAddr = CGF.Builder.CreateElementBitCast(Tmp, MemTy);
5338 // Otherwise the object is contiguous in memory.
5340 // It might be right-aligned in its slot.
5341 CharUnits SlotSize = BaseAddr.getAlignment();
5342 if (CGF.CGM.getDataLayout().isBigEndian() && !IsIndirect &&
5343 (IsHFA || !isAggregateTypeForABI(Ty)) &&
5344 TyInfo.first < SlotSize) {
5345 CharUnits Offset = SlotSize - TyInfo.first;
5346 BaseAddr = CGF.Builder.CreateConstInBoundsByteGEP(BaseAddr, Offset);
5349 RegAddr = CGF.Builder.CreateElementBitCast(BaseAddr, MemTy);
5352 CGF.EmitBranch(ContBlock);
5354 //=======================================
5355 // Argument was on the stack
5356 //=======================================
5357 CGF.EmitBlock(OnStackBlock);
5359 Address stack_p = CGF.Builder.CreateStructGEP(VAListAddr, 0,
5360 CharUnits::Zero(), "stack_p");
5361 llvm::Value *OnStackPtr = CGF.Builder.CreateLoad(stack_p, "stack");
5363 // Again, stack arguments may need realignment. In this case both integer and
5364 // floating-point ones might be affected.
5365 if (!IsIndirect && TyAlign.getQuantity() > 8) {
5366 int Align = TyAlign.getQuantity();
5368 OnStackPtr = CGF.Builder.CreatePtrToInt(OnStackPtr, CGF.Int64Ty);
5370 OnStackPtr = CGF.Builder.CreateAdd(
5371 OnStackPtr, llvm::ConstantInt::get(CGF.Int64Ty, Align - 1),
5373 OnStackPtr = CGF.Builder.CreateAnd(
5374 OnStackPtr, llvm::ConstantInt::get(CGF.Int64Ty, -Align),
5377 OnStackPtr = CGF.Builder.CreateIntToPtr(OnStackPtr, CGF.Int8PtrTy);
5379 Address OnStackAddr(OnStackPtr,
5380 std::max(CharUnits::fromQuantity(8), TyAlign));
5382 // All stack slots are multiples of 8 bytes.
5383 CharUnits StackSlotSize = CharUnits::fromQuantity(8);
5384 CharUnits StackSize;
5386 StackSize = StackSlotSize;
5388 StackSize = TyInfo.first.alignTo(StackSlotSize);
5390 llvm::Value *StackSizeC = CGF.Builder.getSize(StackSize);
5391 llvm::Value *NewStack =
5392 CGF.Builder.CreateInBoundsGEP(OnStackPtr, StackSizeC, "new_stack");
5394 // Write the new value of __stack for the next call to va_arg
5395 CGF.Builder.CreateStore(NewStack, stack_p);
5397 if (CGF.CGM.getDataLayout().isBigEndian() && !isAggregateTypeForABI(Ty) &&
5398 TyInfo.first < StackSlotSize) {
5399 CharUnits Offset = StackSlotSize - TyInfo.first;
5400 OnStackAddr = CGF.Builder.CreateConstInBoundsByteGEP(OnStackAddr, Offset);
5403 OnStackAddr = CGF.Builder.CreateElementBitCast(OnStackAddr, MemTy);
5405 CGF.EmitBranch(ContBlock);
5407 //=======================================
5409 //=======================================
5410 CGF.EmitBlock(ContBlock);
5412 Address ResAddr = emitMergePHI(CGF, RegAddr, InRegBlock,
5413 OnStackAddr, OnStackBlock, "vaargs.addr");
5416 return Address(CGF.Builder.CreateLoad(ResAddr, "vaarg.addr"),
5422 Address AArch64ABIInfo::EmitDarwinVAArg(Address VAListAddr, QualType Ty,
5423 CodeGenFunction &CGF) const {
5424 // The backend's lowering doesn't support va_arg for aggregates or
5425 // illegal vector types. Lower VAArg here for these cases and use
5426 // the LLVM va_arg instruction for everything else.
5427 if (!isAggregateTypeForABI(Ty) && !isIllegalVectorType(Ty))
5428 return EmitVAArgInstr(CGF, VAListAddr, Ty, ABIArgInfo::getDirect());
5430 CharUnits SlotSize = CharUnits::fromQuantity(8);
5432 // Empty records are ignored for parameter passing purposes.
5433 if (isEmptyRecord(getContext(), Ty, true)) {
5434 Address Addr(CGF.Builder.CreateLoad(VAListAddr, "ap.cur"), SlotSize);
5435 Addr = CGF.Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(Ty));
5439 // The size of the actual thing passed, which might end up just
5440 // being a pointer for indirect types.
5441 auto TyInfo = getContext().getTypeInfoInChars(Ty);
5443 // Arguments bigger than 16 bytes which aren't homogeneous
5444 // aggregates should be passed indirectly.
5445 bool IsIndirect = false;
5446 if (TyInfo.first.getQuantity() > 16) {
5447 const Type *Base = nullptr;
5448 uint64_t Members = 0;
5449 IsIndirect = !isHomogeneousAggregate(Ty, Base, Members);
5452 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect,
5453 TyInfo, SlotSize, /*AllowHigherAlign*/ true);
5456 Address AArch64ABIInfo::EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
5457 QualType Ty) const {
5458 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*indirect*/ false,
5459 CGF.getContext().getTypeInfoInChars(Ty),
5460 CharUnits::fromQuantity(8),
5461 /*allowHigherAlign*/ false);
5464 //===----------------------------------------------------------------------===//
5465 // ARM ABI Implementation
5466 //===----------------------------------------------------------------------===//
5470 class ARMABIInfo : public SwiftABIInfo {
5483 ARMABIInfo(CodeGenTypes &CGT, ABIKind _Kind)
5484 : SwiftABIInfo(CGT), Kind(_Kind) {
5488 bool isEABI() const {
5489 switch (getTarget().getTriple().getEnvironment()) {
5490 case llvm::Triple::Android:
5491 case llvm::Triple::EABI:
5492 case llvm::Triple::EABIHF:
5493 case llvm::Triple::GNUEABI:
5494 case llvm::Triple::GNUEABIHF:
5495 case llvm::Triple::MuslEABI:
5496 case llvm::Triple::MuslEABIHF:
5503 bool isEABIHF() const {
5504 switch (getTarget().getTriple().getEnvironment()) {
5505 case llvm::Triple::EABIHF:
5506 case llvm::Triple::GNUEABIHF:
5507 case llvm::Triple::MuslEABIHF:
5514 ABIKind getABIKind() const { return Kind; }
5517 ABIArgInfo classifyReturnType(QualType RetTy, bool isVariadic) const;
5518 ABIArgInfo classifyArgumentType(QualType RetTy, bool isVariadic) const;
5519 bool isIllegalVectorType(QualType Ty) const;
5521 bool isHomogeneousAggregateBaseType(QualType Ty) const override;
5522 bool isHomogeneousAggregateSmallEnough(const Type *Ty,
5523 uint64_t Members) const override;
5525 void computeInfo(CGFunctionInfo &FI) const override;
5527 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
5528 QualType Ty) const override;
5530 llvm::CallingConv::ID getLLVMDefaultCC() const;
5531 llvm::CallingConv::ID getABIDefaultCC() const;
5534 bool shouldPassIndirectlyForSwift(CharUnits totalSize,
5535 ArrayRef<llvm::Type*> scalars,
5536 bool asReturnValue) const override {
5537 return occupiesMoreThan(CGT, scalars, /*total*/ 4);
5539 bool isSwiftErrorInRegister() const override {
5542 bool isLegalVectorTypeForSwift(CharUnits totalSize, llvm::Type *eltTy,
5543 unsigned elts) const override;
5546 class ARMTargetCodeGenInfo : public TargetCodeGenInfo {
5548 ARMTargetCodeGenInfo(CodeGenTypes &CGT, ARMABIInfo::ABIKind K)
5549 :TargetCodeGenInfo(new ARMABIInfo(CGT, K)) {}
5551 const ARMABIInfo &getABIInfo() const {
5552 return static_cast<const ARMABIInfo&>(TargetCodeGenInfo::getABIInfo());
5555 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
5559 StringRef getARCRetainAutoreleasedReturnValueMarker() const override {
5560 return "mov\tr7, r7\t\t// marker for objc_retainAutoreleaseReturnValue";
5563 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
5564 llvm::Value *Address) const override {
5565 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4);
5567 // 0-15 are the 16 integer registers.
5568 AssignToArrayRange(CGF.Builder, Address, Four8, 0, 15);
5572 unsigned getSizeOfUnwindException() const override {
5573 if (getABIInfo().isEABI()) return 88;
5574 return TargetCodeGenInfo::getSizeOfUnwindException();
5577 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
5578 CodeGen::CodeGenModule &CGM,
5579 ForDefinition_t IsForDefinition) const override {
5580 if (!IsForDefinition)
5582 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
5586 const ARMInterruptAttr *Attr = FD->getAttr<ARMInterruptAttr>();
5591 switch (Attr->getInterrupt()) {
5592 case ARMInterruptAttr::Generic: Kind = ""; break;
5593 case ARMInterruptAttr::IRQ: Kind = "IRQ"; break;
5594 case ARMInterruptAttr::FIQ: Kind = "FIQ"; break;
5595 case ARMInterruptAttr::SWI: Kind = "SWI"; break;
5596 case ARMInterruptAttr::ABORT: Kind = "ABORT"; break;
5597 case ARMInterruptAttr::UNDEF: Kind = "UNDEF"; break;
5600 llvm::Function *Fn = cast<llvm::Function>(GV);
5602 Fn->addFnAttr("interrupt", Kind);
5604 ARMABIInfo::ABIKind ABI = cast<ARMABIInfo>(getABIInfo()).getABIKind();
5605 if (ABI == ARMABIInfo::APCS)
5608 // AAPCS guarantees that sp will be 8-byte aligned on any public interface,
5609 // however this is not necessarily true on taking any interrupt. Instruct
5610 // the backend to perform a realignment as part of the function prologue.
5611 llvm::AttrBuilder B;
5612 B.addStackAlignmentAttr(8);
5613 Fn->addAttributes(llvm::AttributeList::FunctionIndex, B);
5617 class WindowsARMTargetCodeGenInfo : public ARMTargetCodeGenInfo {
5619 WindowsARMTargetCodeGenInfo(CodeGenTypes &CGT, ARMABIInfo::ABIKind K)
5620 : ARMTargetCodeGenInfo(CGT, K) {}
5622 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
5623 CodeGen::CodeGenModule &CGM,
5624 ForDefinition_t IsForDefinition) const override;
5626 void getDependentLibraryOption(llvm::StringRef Lib,
5627 llvm::SmallString<24> &Opt) const override {
5628 Opt = "/DEFAULTLIB:" + qualifyWindowsLibrary(Lib);
5631 void getDetectMismatchOption(llvm::StringRef Name, llvm::StringRef Value,
5632 llvm::SmallString<32> &Opt) const override {
5633 Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\"";
5637 void WindowsARMTargetCodeGenInfo::setTargetAttributes(
5638 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM,
5639 ForDefinition_t IsForDefinition) const {
5640 ARMTargetCodeGenInfo::setTargetAttributes(D, GV, CGM, IsForDefinition);
5641 if (!IsForDefinition)
5643 addStackProbeSizeTargetAttribute(D, GV, CGM);
5647 void ARMABIInfo::computeInfo(CGFunctionInfo &FI) const {
5648 if (!getCXXABI().classifyReturnType(FI))
5649 FI.getReturnInfo() =
5650 classifyReturnType(FI.getReturnType(), FI.isVariadic());
5652 for (auto &I : FI.arguments())
5653 I.info = classifyArgumentType(I.type, FI.isVariadic());
5655 // Always honor user-specified calling convention.
5656 if (FI.getCallingConvention() != llvm::CallingConv::C)
5659 llvm::CallingConv::ID cc = getRuntimeCC();
5660 if (cc != llvm::CallingConv::C)
5661 FI.setEffectiveCallingConvention(cc);
5664 /// Return the default calling convention that LLVM will use.
5665 llvm::CallingConv::ID ARMABIInfo::getLLVMDefaultCC() const {
5666 // The default calling convention that LLVM will infer.
5667 if (isEABIHF() || getTarget().getTriple().isWatchABI())
5668 return llvm::CallingConv::ARM_AAPCS_VFP;
5670 return llvm::CallingConv::ARM_AAPCS;
5672 return llvm::CallingConv::ARM_APCS;
5675 /// Return the calling convention that our ABI would like us to use
5676 /// as the C calling convention.
5677 llvm::CallingConv::ID ARMABIInfo::getABIDefaultCC() const {
5678 switch (getABIKind()) {
5679 case APCS: return llvm::CallingConv::ARM_APCS;
5680 case AAPCS: return llvm::CallingConv::ARM_AAPCS;
5681 case AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP;
5682 case AAPCS16_VFP: return llvm::CallingConv::ARM_AAPCS_VFP;
5684 llvm_unreachable("bad ABI kind");
5687 void ARMABIInfo::setCCs() {
5688 assert(getRuntimeCC() == llvm::CallingConv::C);
5690 // Don't muddy up the IR with a ton of explicit annotations if
5691 // they'd just match what LLVM will infer from the triple.
5692 llvm::CallingConv::ID abiCC = getABIDefaultCC();
5693 if (abiCC != getLLVMDefaultCC())
5696 // AAPCS apparently requires runtime support functions to be soft-float, but
5697 // that's almost certainly for historic reasons (Thumb1 not supporting VFP
5698 // most likely). It's more convenient for AAPCS16_VFP to be hard-float.
5700 // The Run-time ABI for the ARM Architecture section 4.1.2 requires
5701 // AEABI-complying FP helper functions to use the base AAPCS.
5702 // These AEABI functions are expanded in the ARM llvm backend, all the builtin
5703 // support functions emitted by clang such as the _Complex helpers follow the
5705 if (abiCC != getLLVMDefaultCC())
5709 ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty,
5710 bool isVariadic) const {
5711 // 6.1.2.1 The following argument types are VFP CPRCs:
5712 // A single-precision floating-point type (including promoted
5713 // half-precision types); A double-precision floating-point type;
5714 // A 64-bit or 128-bit containerized vector type; Homogeneous Aggregate
5715 // with a Base Type of a single- or double-precision floating-point type,
5716 // 64-bit containerized vectors or 128-bit containerized vectors with one
5717 // to four Elements.
5718 bool IsEffectivelyAAPCS_VFP = getABIKind() == AAPCS_VFP && !isVariadic;
5720 Ty = useFirstFieldIfTransparentUnion(Ty);
5722 // Handle illegal vector types here.
5723 if (isIllegalVectorType(Ty)) {
5724 uint64_t Size = getContext().getTypeSize(Ty);
5726 llvm::Type *ResType =
5727 llvm::Type::getInt32Ty(getVMContext());
5728 return ABIArgInfo::getDirect(ResType);
5731 llvm::Type *ResType = llvm::VectorType::get(
5732 llvm::Type::getInt32Ty(getVMContext()), 2);
5733 return ABIArgInfo::getDirect(ResType);
5736 llvm::Type *ResType = llvm::VectorType::get(
5737 llvm::Type::getInt32Ty(getVMContext()), 4);
5738 return ABIArgInfo::getDirect(ResType);
5740 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
5743 // __fp16 gets passed as if it were an int or float, but with the top 16 bits
5744 // unspecified. This is not done for OpenCL as it handles the half type
5745 // natively, and does not need to interwork with AAPCS code.
5746 if (Ty->isHalfType() && !getContext().getLangOpts().NativeHalfArgsAndReturns) {
5747 llvm::Type *ResType = IsEffectivelyAAPCS_VFP ?
5748 llvm::Type::getFloatTy(getVMContext()) :
5749 llvm::Type::getInt32Ty(getVMContext());
5750 return ABIArgInfo::getDirect(ResType);
5753 if (!isAggregateTypeForABI(Ty)) {
5754 // Treat an enum type as its underlying type.
5755 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) {
5756 Ty = EnumTy->getDecl()->getIntegerType();
5759 return (Ty->isPromotableIntegerType() ? ABIArgInfo::getExtend()
5760 : ABIArgInfo::getDirect());
5763 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) {
5764 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
5767 // Ignore empty records.
5768 if (isEmptyRecord(getContext(), Ty, true))
5769 return ABIArgInfo::getIgnore();
5771 if (IsEffectivelyAAPCS_VFP) {
5772 // Homogeneous Aggregates need to be expanded when we can fit the aggregate
5773 // into VFP registers.
5774 const Type *Base = nullptr;
5775 uint64_t Members = 0;
5776 if (isHomogeneousAggregate(Ty, Base, Members)) {
5777 assert(Base && "Base class should be set for homogeneous aggregate");
5778 // Base can be a floating-point or a vector.
5779 return ABIArgInfo::getDirect(nullptr, 0, nullptr, false);
5781 } else if (getABIKind() == ARMABIInfo::AAPCS16_VFP) {
5782 // WatchOS does have homogeneous aggregates. Note that we intentionally use
5783 // this convention even for a variadic function: the backend will use GPRs
5785 const Type *Base = nullptr;
5786 uint64_t Members = 0;
5787 if (isHomogeneousAggregate(Ty, Base, Members)) {
5788 assert(Base && Members <= 4 && "unexpected homogeneous aggregate");
5790 llvm::ArrayType::get(CGT.ConvertType(QualType(Base, 0)), Members);
5791 return ABIArgInfo::getDirect(Ty, 0, nullptr, false);
5795 if (getABIKind() == ARMABIInfo::AAPCS16_VFP &&
5796 getContext().getTypeSizeInChars(Ty) > CharUnits::fromQuantity(16)) {
5797 // WatchOS is adopting the 64-bit AAPCS rule on composite types: if they're
5798 // bigger than 128-bits, they get placed in space allocated by the caller,
5799 // and a pointer is passed.
5800 return ABIArgInfo::getIndirect(
5801 CharUnits::fromQuantity(getContext().getTypeAlign(Ty) / 8), false);
5804 // Support byval for ARM.
5805 // The ABI alignment for APCS is 4-byte and for AAPCS at least 4-byte and at
5806 // most 8-byte. We realign the indirect argument if type alignment is bigger
5807 // than ABI alignment.
5808 uint64_t ABIAlign = 4;
5809 uint64_t TyAlign = getContext().getTypeAlign(Ty) / 8;
5810 if (getABIKind() == ARMABIInfo::AAPCS_VFP ||
5811 getABIKind() == ARMABIInfo::AAPCS)
5812 ABIAlign = std::min(std::max(TyAlign, (uint64_t)4), (uint64_t)8);
5814 if (getContext().getTypeSizeInChars(Ty) > CharUnits::fromQuantity(64)) {
5815 assert(getABIKind() != ARMABIInfo::AAPCS16_VFP && "unexpected byval");
5816 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(ABIAlign),
5818 /*Realign=*/TyAlign > ABIAlign);
5821 // On RenderScript, coerce Aggregates <= 64 bytes to an integer array of
5822 // same size and alignment.
5823 if (getTarget().isRenderScriptTarget()) {
5824 return coerceToIntArray(Ty, getContext(), getVMContext());
5827 // Otherwise, pass by coercing to a structure of the appropriate size.
5830 // FIXME: Try to match the types of the arguments more accurately where
5832 if (getContext().getTypeAlign(Ty) <= 32) {
5833 ElemTy = llvm::Type::getInt32Ty(getVMContext());
5834 SizeRegs = (getContext().getTypeSize(Ty) + 31) / 32;
5836 ElemTy = llvm::Type::getInt64Ty(getVMContext());
5837 SizeRegs = (getContext().getTypeSize(Ty) + 63) / 64;
5840 return ABIArgInfo::getDirect(llvm::ArrayType::get(ElemTy, SizeRegs));
5843 static bool isIntegerLikeType(QualType Ty, ASTContext &Context,
5844 llvm::LLVMContext &VMContext) {
5845 // APCS, C Language Calling Conventions, Non-Simple Return Values: A structure
5846 // is called integer-like if its size is less than or equal to one word, and
5847 // the offset of each of its addressable sub-fields is zero.
5849 uint64_t Size = Context.getTypeSize(Ty);
5851 // Check that the type fits in a word.
5855 // FIXME: Handle vector types!
5856 if (Ty->isVectorType())
5859 // Float types are never treated as "integer like".
5860 if (Ty->isRealFloatingType())
5863 // If this is a builtin or pointer type then it is ok.
5864 if (Ty->getAs<BuiltinType>() || Ty->isPointerType())
5867 // Small complex integer types are "integer like".
5868 if (const ComplexType *CT = Ty->getAs<ComplexType>())
5869 return isIntegerLikeType(CT->getElementType(), Context, VMContext);
5871 // Single element and zero sized arrays should be allowed, by the definition
5872 // above, but they are not.
5874 // Otherwise, it must be a record type.
5875 const RecordType *RT = Ty->getAs<RecordType>();
5876 if (!RT) return false;
5878 // Ignore records with flexible arrays.
5879 const RecordDecl *RD = RT->getDecl();
5880 if (RD->hasFlexibleArrayMember())
5883 // Check that all sub-fields are at offset 0, and are themselves "integer
5885 const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
5887 bool HadField = false;
5889 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
5890 i != e; ++i, ++idx) {
5891 const FieldDecl *FD = *i;
5893 // Bit-fields are not addressable, we only need to verify they are "integer
5894 // like". We still have to disallow a subsequent non-bitfield, for example:
5895 // struct { int : 0; int x }
5896 // is non-integer like according to gcc.
5897 if (FD->isBitField()) {
5901 if (!isIntegerLikeType(FD->getType(), Context, VMContext))
5907 // Check if this field is at offset 0.
5908 if (Layout.getFieldOffset(idx) != 0)
5911 if (!isIntegerLikeType(FD->getType(), Context, VMContext))
5914 // Only allow at most one field in a structure. This doesn't match the
5915 // wording above, but follows gcc in situations with a field following an
5917 if (!RD->isUnion()) {
5928 ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy,
5929 bool isVariadic) const {
5930 bool IsEffectivelyAAPCS_VFP =
5931 (getABIKind() == AAPCS_VFP || getABIKind() == AAPCS16_VFP) && !isVariadic;
5933 if (RetTy->isVoidType())
5934 return ABIArgInfo::getIgnore();
5936 // Large vector types should be returned via memory.
5937 if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 128) {
5938 return getNaturalAlignIndirect(RetTy);
5941 // __fp16 gets returned as if it were an int or float, but with the top 16
5942 // bits unspecified. This is not done for OpenCL as it handles the half type
5943 // natively, and does not need to interwork with AAPCS code.
5944 if (RetTy->isHalfType() && !getContext().getLangOpts().NativeHalfArgsAndReturns) {
5945 llvm::Type *ResType = IsEffectivelyAAPCS_VFP ?
5946 llvm::Type::getFloatTy(getVMContext()) :
5947 llvm::Type::getInt32Ty(getVMContext());
5948 return ABIArgInfo::getDirect(ResType);
5951 if (!isAggregateTypeForABI(RetTy)) {
5952 // Treat an enum type as its underlying type.
5953 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
5954 RetTy = EnumTy->getDecl()->getIntegerType();
5956 return RetTy->isPromotableIntegerType() ? ABIArgInfo::getExtend()
5957 : ABIArgInfo::getDirect();
5960 // Are we following APCS?
5961 if (getABIKind() == APCS) {
5962 if (isEmptyRecord(getContext(), RetTy, false))
5963 return ABIArgInfo::getIgnore();
5965 // Complex types are all returned as packed integers.
5967 // FIXME: Consider using 2 x vector types if the back end handles them
5969 if (RetTy->isAnyComplexType())
5970 return ABIArgInfo::getDirect(llvm::IntegerType::get(
5971 getVMContext(), getContext().getTypeSize(RetTy)));
5973 // Integer like structures are returned in r0.
5974 if (isIntegerLikeType(RetTy, getContext(), getVMContext())) {
5975 // Return in the smallest viable integer type.
5976 uint64_t Size = getContext().getTypeSize(RetTy);
5978 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
5980 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
5981 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
5984 // Otherwise return in memory.
5985 return getNaturalAlignIndirect(RetTy);
5988 // Otherwise this is an AAPCS variant.
5990 if (isEmptyRecord(getContext(), RetTy, true))
5991 return ABIArgInfo::getIgnore();
5993 // Check for homogeneous aggregates with AAPCS-VFP.
5994 if (IsEffectivelyAAPCS_VFP) {
5995 const Type *Base = nullptr;
5996 uint64_t Members = 0;
5997 if (isHomogeneousAggregate(RetTy, Base, Members)) {
5998 assert(Base && "Base class should be set for homogeneous aggregate");
5999 // Homogeneous Aggregates are returned directly.
6000 return ABIArgInfo::getDirect(nullptr, 0, nullptr, false);
6004 // Aggregates <= 4 bytes are returned in r0; other aggregates
6005 // are returned indirectly.
6006 uint64_t Size = getContext().getTypeSize(RetTy);
6008 // On RenderScript, coerce Aggregates <= 4 bytes to an integer array of
6009 // same size and alignment.
6010 if (getTarget().isRenderScriptTarget()) {
6011 return coerceToIntArray(RetTy, getContext(), getVMContext());
6013 if (getDataLayout().isBigEndian())
6014 // Return in 32 bit integer integer type (as if loaded by LDR, AAPCS 5.4)
6015 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
6017 // Return in the smallest viable integer type.
6019 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
6021 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
6022 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
6023 } else if (Size <= 128 && getABIKind() == AAPCS16_VFP) {
6024 llvm::Type *Int32Ty = llvm::Type::getInt32Ty(getVMContext());
6025 llvm::Type *CoerceTy =
6026 llvm::ArrayType::get(Int32Ty, llvm::alignTo(Size, 32) / 32);
6027 return ABIArgInfo::getDirect(CoerceTy);
6030 return getNaturalAlignIndirect(RetTy);
6033 /// isIllegalVector - check whether Ty is an illegal vector type.
6034 bool ARMABIInfo::isIllegalVectorType(QualType Ty) const {
6035 if (const VectorType *VT = Ty->getAs<VectorType> ()) {
6037 // Android shipped using Clang 3.1, which supported a slightly different
6038 // vector ABI. The primary differences were that 3-element vector types
6039 // were legal, and so were sub 32-bit vectors (i.e. <2 x i8>). This path
6040 // accepts that legacy behavior for Android only.
6041 // Check whether VT is legal.
6042 unsigned NumElements = VT->getNumElements();
6043 // NumElements should be power of 2 or equal to 3.
6044 if (!llvm::isPowerOf2_32(NumElements) && NumElements != 3)
6047 // Check whether VT is legal.
6048 unsigned NumElements = VT->getNumElements();
6049 uint64_t Size = getContext().getTypeSize(VT);
6050 // NumElements should be power of 2.
6051 if (!llvm::isPowerOf2_32(NumElements))
6053 // Size should be greater than 32 bits.
6060 bool ARMABIInfo::isLegalVectorTypeForSwift(CharUnits vectorSize,
6062 unsigned numElts) const {
6063 if (!llvm::isPowerOf2_32(numElts))
6065 unsigned size = getDataLayout().getTypeStoreSizeInBits(eltTy);
6068 if (vectorSize.getQuantity() != 8 &&
6069 (vectorSize.getQuantity() != 16 || numElts == 1))
6074 bool ARMABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
6075 // Homogeneous aggregates for AAPCS-VFP must have base types of float,
6076 // double, or 64-bit or 128-bit vectors.
6077 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
6078 if (BT->getKind() == BuiltinType::Float ||
6079 BT->getKind() == BuiltinType::Double ||
6080 BT->getKind() == BuiltinType::LongDouble)
6082 } else if (const VectorType *VT = Ty->getAs<VectorType>()) {
6083 unsigned VecSize = getContext().getTypeSize(VT);
6084 if (VecSize == 64 || VecSize == 128)
6090 bool ARMABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base,
6091 uint64_t Members) const {
6092 return Members <= 4;
6095 Address ARMABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
6096 QualType Ty) const {
6097 CharUnits SlotSize = CharUnits::fromQuantity(4);
6099 // Empty records are ignored for parameter passing purposes.
6100 if (isEmptyRecord(getContext(), Ty, true)) {
6101 Address Addr(CGF.Builder.CreateLoad(VAListAddr), SlotSize);
6102 Addr = CGF.Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(Ty));
6106 auto TyInfo = getContext().getTypeInfoInChars(Ty);
6107 CharUnits TyAlignForABI = TyInfo.second;
6109 // Use indirect if size of the illegal vector is bigger than 16 bytes.
6110 bool IsIndirect = false;
6111 const Type *Base = nullptr;
6112 uint64_t Members = 0;
6113 if (TyInfo.first > CharUnits::fromQuantity(16) && isIllegalVectorType(Ty)) {
6116 // ARMv7k passes structs bigger than 16 bytes indirectly, in space
6117 // allocated by the caller.
6118 } else if (TyInfo.first > CharUnits::fromQuantity(16) &&
6119 getABIKind() == ARMABIInfo::AAPCS16_VFP &&
6120 !isHomogeneousAggregate(Ty, Base, Members)) {
6123 // Otherwise, bound the type's ABI alignment.
6124 // The ABI alignment for 64-bit or 128-bit vectors is 8 for AAPCS and 4 for
6125 // APCS. For AAPCS, the ABI alignment is at least 4-byte and at most 8-byte.
6126 // Our callers should be prepared to handle an under-aligned address.
6127 } else if (getABIKind() == ARMABIInfo::AAPCS_VFP ||
6128 getABIKind() == ARMABIInfo::AAPCS) {
6129 TyAlignForABI = std::max(TyAlignForABI, CharUnits::fromQuantity(4));
6130 TyAlignForABI = std::min(TyAlignForABI, CharUnits::fromQuantity(8));
6131 } else if (getABIKind() == ARMABIInfo::AAPCS16_VFP) {
6132 // ARMv7k allows type alignment up to 16 bytes.
6133 TyAlignForABI = std::max(TyAlignForABI, CharUnits::fromQuantity(4));
6134 TyAlignForABI = std::min(TyAlignForABI, CharUnits::fromQuantity(16));
6136 TyAlignForABI = CharUnits::fromQuantity(4);
6138 TyInfo.second = TyAlignForABI;
6140 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect, TyInfo,
6141 SlotSize, /*AllowHigherAlign*/ true);
6144 //===----------------------------------------------------------------------===//
6145 // NVPTX ABI Implementation
6146 //===----------------------------------------------------------------------===//
6150 class NVPTXABIInfo : public ABIInfo {
6152 NVPTXABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
6154 ABIArgInfo classifyReturnType(QualType RetTy) const;
6155 ABIArgInfo classifyArgumentType(QualType Ty) const;
6157 void computeInfo(CGFunctionInfo &FI) const override;
6158 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
6159 QualType Ty) const override;
6162 class NVPTXTargetCodeGenInfo : public TargetCodeGenInfo {
6164 NVPTXTargetCodeGenInfo(CodeGenTypes &CGT)
6165 : TargetCodeGenInfo(new NVPTXABIInfo(CGT)) {}
6167 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
6168 CodeGen::CodeGenModule &M,
6169 ForDefinition_t IsForDefinition) const override;
6172 // Adds a NamedMDNode with F, Name, and Operand as operands, and adds the
6173 // resulting MDNode to the nvvm.annotations MDNode.
6174 static void addNVVMMetadata(llvm::Function *F, StringRef Name, int Operand);
6177 ABIArgInfo NVPTXABIInfo::classifyReturnType(QualType RetTy) const {
6178 if (RetTy->isVoidType())
6179 return ABIArgInfo::getIgnore();
6181 // note: this is different from default ABI
6182 if (!RetTy->isScalarType())
6183 return ABIArgInfo::getDirect();
6185 // Treat an enum type as its underlying type.
6186 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
6187 RetTy = EnumTy->getDecl()->getIntegerType();
6189 return (RetTy->isPromotableIntegerType() ?
6190 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
6193 ABIArgInfo NVPTXABIInfo::classifyArgumentType(QualType Ty) const {
6194 // Treat an enum type as its underlying type.
6195 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
6196 Ty = EnumTy->getDecl()->getIntegerType();
6198 // Return aggregates type as indirect by value
6199 if (isAggregateTypeForABI(Ty))
6200 return getNaturalAlignIndirect(Ty, /* byval */ true);
6202 return (Ty->isPromotableIntegerType() ?
6203 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
6206 void NVPTXABIInfo::computeInfo(CGFunctionInfo &FI) const {
6207 if (!getCXXABI().classifyReturnType(FI))
6208 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
6209 for (auto &I : FI.arguments())
6210 I.info = classifyArgumentType(I.type);
6212 // Always honor user-specified calling convention.
6213 if (FI.getCallingConvention() != llvm::CallingConv::C)
6216 FI.setEffectiveCallingConvention(getRuntimeCC());
6219 Address NVPTXABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
6220 QualType Ty) const {
6221 llvm_unreachable("NVPTX does not support varargs");
6224 void NVPTXTargetCodeGenInfo::setTargetAttributes(
6225 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M,
6226 ForDefinition_t IsForDefinition) const {
6227 if (!IsForDefinition)
6229 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
6232 llvm::Function *F = cast<llvm::Function>(GV);
6234 // Perform special handling in OpenCL mode
6235 if (M.getLangOpts().OpenCL) {
6236 // Use OpenCL function attributes to check for kernel functions
6237 // By default, all functions are device functions
6238 if (FD->hasAttr<OpenCLKernelAttr>()) {
6239 // OpenCL __kernel functions get kernel metadata
6240 // Create !{<func-ref>, metadata !"kernel", i32 1} node
6241 addNVVMMetadata(F, "kernel", 1);
6242 // And kernel functions are not subject to inlining
6243 F->addFnAttr(llvm::Attribute::NoInline);
6247 // Perform special handling in CUDA mode.
6248 if (M.getLangOpts().CUDA) {
6249 // CUDA __global__ functions get a kernel metadata entry. Since
6250 // __global__ functions cannot be called from the device, we do not
6251 // need to set the noinline attribute.
6252 if (FD->hasAttr<CUDAGlobalAttr>()) {
6253 // Create !{<func-ref>, metadata !"kernel", i32 1} node
6254 addNVVMMetadata(F, "kernel", 1);
6256 if (CUDALaunchBoundsAttr *Attr = FD->getAttr<CUDALaunchBoundsAttr>()) {
6257 // Create !{<func-ref>, metadata !"maxntidx", i32 <val>} node
6258 llvm::APSInt MaxThreads(32);
6259 MaxThreads = Attr->getMaxThreads()->EvaluateKnownConstInt(M.getContext());
6261 addNVVMMetadata(F, "maxntidx", MaxThreads.getExtValue());
6263 // min blocks is an optional argument for CUDALaunchBoundsAttr. If it was
6264 // not specified in __launch_bounds__ or if the user specified a 0 value,
6265 // we don't have to add a PTX directive.
6266 if (Attr->getMinBlocks()) {
6267 llvm::APSInt MinBlocks(32);
6268 MinBlocks = Attr->getMinBlocks()->EvaluateKnownConstInt(M.getContext());
6270 // Create !{<func-ref>, metadata !"minctasm", i32 <val>} node
6271 addNVVMMetadata(F, "minctasm", MinBlocks.getExtValue());
6277 void NVPTXTargetCodeGenInfo::addNVVMMetadata(llvm::Function *F, StringRef Name,
6279 llvm::Module *M = F->getParent();
6280 llvm::LLVMContext &Ctx = M->getContext();
6282 // Get "nvvm.annotations" metadata node
6283 llvm::NamedMDNode *MD = M->getOrInsertNamedMetadata("nvvm.annotations");
6285 llvm::Metadata *MDVals[] = {
6286 llvm::ConstantAsMetadata::get(F), llvm::MDString::get(Ctx, Name),
6287 llvm::ConstantAsMetadata::get(
6288 llvm::ConstantInt::get(llvm::Type::getInt32Ty(Ctx), Operand))};
6289 // Append metadata to nvvm.annotations
6290 MD->addOperand(llvm::MDNode::get(Ctx, MDVals));
6294 //===----------------------------------------------------------------------===//
6295 // SystemZ ABI Implementation
6296 //===----------------------------------------------------------------------===//
6300 class SystemZABIInfo : public SwiftABIInfo {
6304 SystemZABIInfo(CodeGenTypes &CGT, bool HV)
6305 : SwiftABIInfo(CGT), HasVector(HV) {}
6307 bool isPromotableIntegerType(QualType Ty) const;
6308 bool isCompoundType(QualType Ty) const;
6309 bool isVectorArgumentType(QualType Ty) const;
6310 bool isFPArgumentType(QualType Ty) const;
6311 QualType GetSingleElementType(QualType Ty) const;
6313 ABIArgInfo classifyReturnType(QualType RetTy) const;
6314 ABIArgInfo classifyArgumentType(QualType ArgTy) const;
6316 void computeInfo(CGFunctionInfo &FI) const override {
6317 if (!getCXXABI().classifyReturnType(FI))
6318 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
6319 for (auto &I : FI.arguments())
6320 I.info = classifyArgumentType(I.type);
6323 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
6324 QualType Ty) const override;
6326 bool shouldPassIndirectlyForSwift(CharUnits totalSize,
6327 ArrayRef<llvm::Type*> scalars,
6328 bool asReturnValue) const override {
6329 return occupiesMoreThan(CGT, scalars, /*total*/ 4);
6331 bool isSwiftErrorInRegister() const override {
6336 class SystemZTargetCodeGenInfo : public TargetCodeGenInfo {
6338 SystemZTargetCodeGenInfo(CodeGenTypes &CGT, bool HasVector)
6339 : TargetCodeGenInfo(new SystemZABIInfo(CGT, HasVector)) {}
6344 bool SystemZABIInfo::isPromotableIntegerType(QualType Ty) const {
6345 // Treat an enum type as its underlying type.
6346 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
6347 Ty = EnumTy->getDecl()->getIntegerType();
6349 // Promotable integer types are required to be promoted by the ABI.
6350 if (Ty->isPromotableIntegerType())
6353 // 32-bit values must also be promoted.
6354 if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
6355 switch (BT->getKind()) {
6356 case BuiltinType::Int:
6357 case BuiltinType::UInt:
6365 bool SystemZABIInfo::isCompoundType(QualType Ty) const {
6366 return (Ty->isAnyComplexType() ||
6367 Ty->isVectorType() ||
6368 isAggregateTypeForABI(Ty));
6371 bool SystemZABIInfo::isVectorArgumentType(QualType Ty) const {
6372 return (HasVector &&
6373 Ty->isVectorType() &&
6374 getContext().getTypeSize(Ty) <= 128);
6377 bool SystemZABIInfo::isFPArgumentType(QualType Ty) const {
6378 if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
6379 switch (BT->getKind()) {
6380 case BuiltinType::Float:
6381 case BuiltinType::Double:
6390 QualType SystemZABIInfo::GetSingleElementType(QualType Ty) const {
6391 if (const RecordType *RT = Ty->getAsStructureType()) {
6392 const RecordDecl *RD = RT->getDecl();
6395 // If this is a C++ record, check the bases first.
6396 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
6397 for (const auto &I : CXXRD->bases()) {
6398 QualType Base = I.getType();
6400 // Empty bases don't affect things either way.
6401 if (isEmptyRecord(getContext(), Base, true))
6404 if (!Found.isNull())
6406 Found = GetSingleElementType(Base);
6409 // Check the fields.
6410 for (const auto *FD : RD->fields()) {
6411 // For compatibility with GCC, ignore empty bitfields in C++ mode.
6412 // Unlike isSingleElementStruct(), empty structure and array fields
6413 // do count. So do anonymous bitfields that aren't zero-sized.
6414 if (getContext().getLangOpts().CPlusPlus &&
6415 FD->isBitField() && FD->getBitWidthValue(getContext()) == 0)
6418 // Unlike isSingleElementStruct(), arrays do not count.
6419 // Nested structures still do though.
6420 if (!Found.isNull())
6422 Found = GetSingleElementType(FD->getType());
6425 // Unlike isSingleElementStruct(), trailing padding is allowed.
6426 // An 8-byte aligned struct s { float f; } is passed as a double.
6427 if (!Found.isNull())
6434 Address SystemZABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
6435 QualType Ty) const {
6436 // Assume that va_list type is correct; should be pointer to LLVM type:
6440 // i8 *__overflow_arg_area;
6441 // i8 *__reg_save_area;
6444 // Every non-vector argument occupies 8 bytes and is passed by preference
6445 // in either GPRs or FPRs. Vector arguments occupy 8 or 16 bytes and are
6446 // always passed on the stack.
6447 Ty = getContext().getCanonicalType(Ty);
6448 auto TyInfo = getContext().getTypeInfoInChars(Ty);
6449 llvm::Type *ArgTy = CGF.ConvertTypeForMem(Ty);
6450 llvm::Type *DirectTy = ArgTy;
6451 ABIArgInfo AI = classifyArgumentType(Ty);
6452 bool IsIndirect = AI.isIndirect();
6453 bool InFPRs = false;
6454 bool IsVector = false;
6455 CharUnits UnpaddedSize;
6456 CharUnits DirectAlign;
6458 DirectTy = llvm::PointerType::getUnqual(DirectTy);
6459 UnpaddedSize = DirectAlign = CharUnits::fromQuantity(8);
6461 if (AI.getCoerceToType())
6462 ArgTy = AI.getCoerceToType();
6463 InFPRs = ArgTy->isFloatTy() || ArgTy->isDoubleTy();
6464 IsVector = ArgTy->isVectorTy();
6465 UnpaddedSize = TyInfo.first;
6466 DirectAlign = TyInfo.second;
6468 CharUnits PaddedSize = CharUnits::fromQuantity(8);
6469 if (IsVector && UnpaddedSize > PaddedSize)
6470 PaddedSize = CharUnits::fromQuantity(16);
6471 assert((UnpaddedSize <= PaddedSize) && "Invalid argument size.");
6473 CharUnits Padding = (PaddedSize - UnpaddedSize);
6475 llvm::Type *IndexTy = CGF.Int64Ty;
6476 llvm::Value *PaddedSizeV =
6477 llvm::ConstantInt::get(IndexTy, PaddedSize.getQuantity());
6480 // Work out the address of a vector argument on the stack.
6481 // Vector arguments are always passed in the high bits of a
6482 // single (8 byte) or double (16 byte) stack slot.
6483 Address OverflowArgAreaPtr =
6484 CGF.Builder.CreateStructGEP(VAListAddr, 2, CharUnits::fromQuantity(16),
6485 "overflow_arg_area_ptr");
6486 Address OverflowArgArea =
6487 Address(CGF.Builder.CreateLoad(OverflowArgAreaPtr, "overflow_arg_area"),
6490 CGF.Builder.CreateElementBitCast(OverflowArgArea, DirectTy, "mem_addr");
6492 // Update overflow_arg_area_ptr pointer
6493 llvm::Value *NewOverflowArgArea =
6494 CGF.Builder.CreateGEP(OverflowArgArea.getPointer(), PaddedSizeV,
6495 "overflow_arg_area");
6496 CGF.Builder.CreateStore(NewOverflowArgArea, OverflowArgAreaPtr);
6501 assert(PaddedSize.getQuantity() == 8);
6503 unsigned MaxRegs, RegCountField, RegSaveIndex;
6504 CharUnits RegPadding;
6506 MaxRegs = 4; // Maximum of 4 FPR arguments
6507 RegCountField = 1; // __fpr
6508 RegSaveIndex = 16; // save offset for f0
6509 RegPadding = CharUnits(); // floats are passed in the high bits of an FPR
6511 MaxRegs = 5; // Maximum of 5 GPR arguments
6512 RegCountField = 0; // __gpr
6513 RegSaveIndex = 2; // save offset for r2
6514 RegPadding = Padding; // values are passed in the low bits of a GPR
6517 Address RegCountPtr = CGF.Builder.CreateStructGEP(
6518 VAListAddr, RegCountField, RegCountField * CharUnits::fromQuantity(8),
6520 llvm::Value *RegCount = CGF.Builder.CreateLoad(RegCountPtr, "reg_count");
6521 llvm::Value *MaxRegsV = llvm::ConstantInt::get(IndexTy, MaxRegs);
6522 llvm::Value *InRegs = CGF.Builder.CreateICmpULT(RegCount, MaxRegsV,
6525 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
6526 llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem");
6527 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
6528 CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock);
6530 // Emit code to load the value if it was passed in registers.
6531 CGF.EmitBlock(InRegBlock);
6533 // Work out the address of an argument register.
6534 llvm::Value *ScaledRegCount =
6535 CGF.Builder.CreateMul(RegCount, PaddedSizeV, "scaled_reg_count");
6536 llvm::Value *RegBase =
6537 llvm::ConstantInt::get(IndexTy, RegSaveIndex * PaddedSize.getQuantity()
6538 + RegPadding.getQuantity());
6539 llvm::Value *RegOffset =
6540 CGF.Builder.CreateAdd(ScaledRegCount, RegBase, "reg_offset");
6541 Address RegSaveAreaPtr =
6542 CGF.Builder.CreateStructGEP(VAListAddr, 3, CharUnits::fromQuantity(24),
6543 "reg_save_area_ptr");
6544 llvm::Value *RegSaveArea =
6545 CGF.Builder.CreateLoad(RegSaveAreaPtr, "reg_save_area");
6546 Address RawRegAddr(CGF.Builder.CreateGEP(RegSaveArea, RegOffset,
6550 CGF.Builder.CreateElementBitCast(RawRegAddr, DirectTy, "reg_addr");
6552 // Update the register count
6553 llvm::Value *One = llvm::ConstantInt::get(IndexTy, 1);
6554 llvm::Value *NewRegCount =
6555 CGF.Builder.CreateAdd(RegCount, One, "reg_count");
6556 CGF.Builder.CreateStore(NewRegCount, RegCountPtr);
6557 CGF.EmitBranch(ContBlock);
6559 // Emit code to load the value if it was passed in memory.
6560 CGF.EmitBlock(InMemBlock);
6562 // Work out the address of a stack argument.
6563 Address OverflowArgAreaPtr = CGF.Builder.CreateStructGEP(
6564 VAListAddr, 2, CharUnits::fromQuantity(16), "overflow_arg_area_ptr");
6565 Address OverflowArgArea =
6566 Address(CGF.Builder.CreateLoad(OverflowArgAreaPtr, "overflow_arg_area"),
6568 Address RawMemAddr =
6569 CGF.Builder.CreateConstByteGEP(OverflowArgArea, Padding, "raw_mem_addr");
6571 CGF.Builder.CreateElementBitCast(RawMemAddr, DirectTy, "mem_addr");
6573 // Update overflow_arg_area_ptr pointer
6574 llvm::Value *NewOverflowArgArea =
6575 CGF.Builder.CreateGEP(OverflowArgArea.getPointer(), PaddedSizeV,
6576 "overflow_arg_area");
6577 CGF.Builder.CreateStore(NewOverflowArgArea, OverflowArgAreaPtr);
6578 CGF.EmitBranch(ContBlock);
6580 // Return the appropriate result.
6581 CGF.EmitBlock(ContBlock);
6582 Address ResAddr = emitMergePHI(CGF, RegAddr, InRegBlock,
6583 MemAddr, InMemBlock, "va_arg.addr");
6586 ResAddr = Address(CGF.Builder.CreateLoad(ResAddr, "indirect_arg"),
6592 ABIArgInfo SystemZABIInfo::classifyReturnType(QualType RetTy) const {
6593 if (RetTy->isVoidType())
6594 return ABIArgInfo::getIgnore();
6595 if (isVectorArgumentType(RetTy))
6596 return ABIArgInfo::getDirect();
6597 if (isCompoundType(RetTy) || getContext().getTypeSize(RetTy) > 64)
6598 return getNaturalAlignIndirect(RetTy);
6599 return (isPromotableIntegerType(RetTy) ?
6600 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
6603 ABIArgInfo SystemZABIInfo::classifyArgumentType(QualType Ty) const {
6604 // Handle the generic C++ ABI.
6605 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
6606 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
6608 // Integers and enums are extended to full register width.
6609 if (isPromotableIntegerType(Ty))
6610 return ABIArgInfo::getExtend();
6612 // Handle vector types and vector-like structure types. Note that
6613 // as opposed to float-like structure types, we do not allow any
6614 // padding for vector-like structures, so verify the sizes match.
6615 uint64_t Size = getContext().getTypeSize(Ty);
6616 QualType SingleElementTy = GetSingleElementType(Ty);
6617 if (isVectorArgumentType(SingleElementTy) &&
6618 getContext().getTypeSize(SingleElementTy) == Size)
6619 return ABIArgInfo::getDirect(CGT.ConvertType(SingleElementTy));
6621 // Values that are not 1, 2, 4 or 8 bytes in size are passed indirectly.
6622 if (Size != 8 && Size != 16 && Size != 32 && Size != 64)
6623 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
6625 // Handle small structures.
6626 if (const RecordType *RT = Ty->getAs<RecordType>()) {
6627 // Structures with flexible arrays have variable length, so really
6628 // fail the size test above.
6629 const RecordDecl *RD = RT->getDecl();
6630 if (RD->hasFlexibleArrayMember())
6631 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
6633 // The structure is passed as an unextended integer, a float, or a double.
6635 if (isFPArgumentType(SingleElementTy)) {
6636 assert(Size == 32 || Size == 64);
6638 PassTy = llvm::Type::getFloatTy(getVMContext());
6640 PassTy = llvm::Type::getDoubleTy(getVMContext());
6642 PassTy = llvm::IntegerType::get(getVMContext(), Size);
6643 return ABIArgInfo::getDirect(PassTy);
6646 // Non-structure compounds are passed indirectly.
6647 if (isCompoundType(Ty))
6648 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
6650 return ABIArgInfo::getDirect(nullptr);
6653 //===----------------------------------------------------------------------===//
6654 // MSP430 ABI Implementation
6655 //===----------------------------------------------------------------------===//
6659 class MSP430TargetCodeGenInfo : public TargetCodeGenInfo {
6661 MSP430TargetCodeGenInfo(CodeGenTypes &CGT)
6662 : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {}
6663 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
6664 CodeGen::CodeGenModule &M,
6665 ForDefinition_t IsForDefinition) const override;
6670 void MSP430TargetCodeGenInfo::setTargetAttributes(
6671 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M,
6672 ForDefinition_t IsForDefinition) const {
6673 if (!IsForDefinition)
6675 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
6676 if (const MSP430InterruptAttr *attr = FD->getAttr<MSP430InterruptAttr>()) {
6677 // Handle 'interrupt' attribute:
6678 llvm::Function *F = cast<llvm::Function>(GV);
6680 // Step 1: Set ISR calling convention.
6681 F->setCallingConv(llvm::CallingConv::MSP430_INTR);
6683 // Step 2: Add attributes goodness.
6684 F->addFnAttr(llvm::Attribute::NoInline);
6686 // Step 3: Emit ISR vector alias.
6687 unsigned Num = attr->getNumber() / 2;
6688 llvm::GlobalAlias::create(llvm::Function::ExternalLinkage,
6689 "__isr_" + Twine(Num), F);
6694 //===----------------------------------------------------------------------===//
6695 // MIPS ABI Implementation. This works for both little-endian and
6696 // big-endian variants.
6697 //===----------------------------------------------------------------------===//
6700 class MipsABIInfo : public ABIInfo {
6702 unsigned MinABIStackAlignInBytes, StackAlignInBytes;
6703 void CoerceToIntArgs(uint64_t TySize,
6704 SmallVectorImpl<llvm::Type *> &ArgList) const;
6705 llvm::Type* HandleAggregates(QualType Ty, uint64_t TySize) const;
6706 llvm::Type* returnAggregateInRegs(QualType RetTy, uint64_t Size) const;
6707 llvm::Type* getPaddingType(uint64_t Align, uint64_t Offset) const;
6709 MipsABIInfo(CodeGenTypes &CGT, bool _IsO32) :
6710 ABIInfo(CGT), IsO32(_IsO32), MinABIStackAlignInBytes(IsO32 ? 4 : 8),
6711 StackAlignInBytes(IsO32 ? 8 : 16) {}
6713 ABIArgInfo classifyReturnType(QualType RetTy) const;
6714 ABIArgInfo classifyArgumentType(QualType RetTy, uint64_t &Offset) const;
6715 void computeInfo(CGFunctionInfo &FI) const override;
6716 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
6717 QualType Ty) const override;
6718 bool shouldSignExtUnsignedType(QualType Ty) const override;
6721 class MIPSTargetCodeGenInfo : public TargetCodeGenInfo {
6722 unsigned SizeOfUnwindException;
6724 MIPSTargetCodeGenInfo(CodeGenTypes &CGT, bool IsO32)
6725 : TargetCodeGenInfo(new MipsABIInfo(CGT, IsO32)),
6726 SizeOfUnwindException(IsO32 ? 24 : 32) {}
6728 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
6732 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
6733 CodeGen::CodeGenModule &CGM,
6734 ForDefinition_t IsForDefinition) const override {
6735 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
6737 llvm::Function *Fn = cast<llvm::Function>(GV);
6739 if (FD->hasAttr<MipsLongCallAttr>())
6740 Fn->addFnAttr("long-call");
6741 else if (FD->hasAttr<MipsShortCallAttr>())
6742 Fn->addFnAttr("short-call");
6744 // Other attributes do not have a meaning for declarations.
6745 if (!IsForDefinition)
6748 if (FD->hasAttr<Mips16Attr>()) {
6749 Fn->addFnAttr("mips16");
6751 else if (FD->hasAttr<NoMips16Attr>()) {
6752 Fn->addFnAttr("nomips16");
6755 if (FD->hasAttr<MicroMipsAttr>())
6756 Fn->addFnAttr("micromips");
6757 else if (FD->hasAttr<NoMicroMipsAttr>())
6758 Fn->addFnAttr("nomicromips");
6760 const MipsInterruptAttr *Attr = FD->getAttr<MipsInterruptAttr>();
6765 switch (Attr->getInterrupt()) {
6766 case MipsInterruptAttr::eic: Kind = "eic"; break;
6767 case MipsInterruptAttr::sw0: Kind = "sw0"; break;
6768 case MipsInterruptAttr::sw1: Kind = "sw1"; break;
6769 case MipsInterruptAttr::hw0: Kind = "hw0"; break;
6770 case MipsInterruptAttr::hw1: Kind = "hw1"; break;
6771 case MipsInterruptAttr::hw2: Kind = "hw2"; break;
6772 case MipsInterruptAttr::hw3: Kind = "hw3"; break;
6773 case MipsInterruptAttr::hw4: Kind = "hw4"; break;
6774 case MipsInterruptAttr::hw5: Kind = "hw5"; break;
6777 Fn->addFnAttr("interrupt", Kind);
6781 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
6782 llvm::Value *Address) const override;
6784 unsigned getSizeOfUnwindException() const override {
6785 return SizeOfUnwindException;
6790 void MipsABIInfo::CoerceToIntArgs(
6791 uint64_t TySize, SmallVectorImpl<llvm::Type *> &ArgList) const {
6792 llvm::IntegerType *IntTy =
6793 llvm::IntegerType::get(getVMContext(), MinABIStackAlignInBytes * 8);
6795 // Add (TySize / MinABIStackAlignInBytes) args of IntTy.
6796 for (unsigned N = TySize / (MinABIStackAlignInBytes * 8); N; --N)
6797 ArgList.push_back(IntTy);
6799 // If necessary, add one more integer type to ArgList.
6800 unsigned R = TySize % (MinABIStackAlignInBytes * 8);
6803 ArgList.push_back(llvm::IntegerType::get(getVMContext(), R));
6806 // In N32/64, an aligned double precision floating point field is passed in
6808 llvm::Type* MipsABIInfo::HandleAggregates(QualType Ty, uint64_t TySize) const {
6809 SmallVector<llvm::Type*, 8> ArgList, IntArgList;
6812 CoerceToIntArgs(TySize, ArgList);
6813 return llvm::StructType::get(getVMContext(), ArgList);
6816 if (Ty->isComplexType())
6817 return CGT.ConvertType(Ty);
6819 const RecordType *RT = Ty->getAs<RecordType>();
6821 // Unions/vectors are passed in integer registers.
6822 if (!RT || !RT->isStructureOrClassType()) {
6823 CoerceToIntArgs(TySize, ArgList);
6824 return llvm::StructType::get(getVMContext(), ArgList);
6827 const RecordDecl *RD = RT->getDecl();
6828 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
6829 assert(!(TySize % 8) && "Size of structure must be multiple of 8.");
6831 uint64_t LastOffset = 0;
6833 llvm::IntegerType *I64 = llvm::IntegerType::get(getVMContext(), 64);
6835 // Iterate over fields in the struct/class and check if there are any aligned
6837 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
6838 i != e; ++i, ++idx) {
6839 const QualType Ty = i->getType();
6840 const BuiltinType *BT = Ty->getAs<BuiltinType>();
6842 if (!BT || BT->getKind() != BuiltinType::Double)
6845 uint64_t Offset = Layout.getFieldOffset(idx);
6846 if (Offset % 64) // Ignore doubles that are not aligned.
6849 // Add ((Offset - LastOffset) / 64) args of type i64.
6850 for (unsigned j = (Offset - LastOffset) / 64; j > 0; --j)
6851 ArgList.push_back(I64);
6854 ArgList.push_back(llvm::Type::getDoubleTy(getVMContext()));
6855 LastOffset = Offset + 64;
6858 CoerceToIntArgs(TySize - LastOffset, IntArgList);
6859 ArgList.append(IntArgList.begin(), IntArgList.end());
6861 return llvm::StructType::get(getVMContext(), ArgList);
6864 llvm::Type *MipsABIInfo::getPaddingType(uint64_t OrigOffset,
6865 uint64_t Offset) const {
6866 if (OrigOffset + MinABIStackAlignInBytes > Offset)
6869 return llvm::IntegerType::get(getVMContext(), (Offset - OrigOffset) * 8);
6873 MipsABIInfo::classifyArgumentType(QualType Ty, uint64_t &Offset) const {
6874 Ty = useFirstFieldIfTransparentUnion(Ty);
6876 uint64_t OrigOffset = Offset;
6877 uint64_t TySize = getContext().getTypeSize(Ty);
6878 uint64_t Align = getContext().getTypeAlign(Ty) / 8;
6880 Align = std::min(std::max(Align, (uint64_t)MinABIStackAlignInBytes),
6881 (uint64_t)StackAlignInBytes);
6882 unsigned CurrOffset = llvm::alignTo(Offset, Align);
6883 Offset = CurrOffset + llvm::alignTo(TySize, Align * 8) / 8;
6885 if (isAggregateTypeForABI(Ty) || Ty->isVectorType()) {
6886 // Ignore empty aggregates.
6888 return ABIArgInfo::getIgnore();
6890 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) {
6891 Offset = OrigOffset + MinABIStackAlignInBytes;
6892 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
6895 // If we have reached here, aggregates are passed directly by coercing to
6896 // another structure type. Padding is inserted if the offset of the
6897 // aggregate is unaligned.
6898 ABIArgInfo ArgInfo =
6899 ABIArgInfo::getDirect(HandleAggregates(Ty, TySize), 0,
6900 getPaddingType(OrigOffset, CurrOffset));
6901 ArgInfo.setInReg(true);
6905 // Treat an enum type as its underlying type.
6906 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
6907 Ty = EnumTy->getDecl()->getIntegerType();
6909 // All integral types are promoted to the GPR width.
6910 if (Ty->isIntegralOrEnumerationType())
6911 return ABIArgInfo::getExtend();
6913 return ABIArgInfo::getDirect(
6914 nullptr, 0, IsO32 ? nullptr : getPaddingType(OrigOffset, CurrOffset));
6918 MipsABIInfo::returnAggregateInRegs(QualType RetTy, uint64_t Size) const {
6919 const RecordType *RT = RetTy->getAs<RecordType>();
6920 SmallVector<llvm::Type*, 8> RTList;
6922 if (RT && RT->isStructureOrClassType()) {
6923 const RecordDecl *RD = RT->getDecl();
6924 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
6925 unsigned FieldCnt = Layout.getFieldCount();
6927 // N32/64 returns struct/classes in floating point registers if the
6928 // following conditions are met:
6929 // 1. The size of the struct/class is no larger than 128-bit.
6930 // 2. The struct/class has one or two fields all of which are floating
6932 // 3. The offset of the first field is zero (this follows what gcc does).
6934 // Any other composite results are returned in integer registers.
6936 if (FieldCnt && (FieldCnt <= 2) && !Layout.getFieldOffset(0)) {
6937 RecordDecl::field_iterator b = RD->field_begin(), e = RD->field_end();
6938 for (; b != e; ++b) {
6939 const BuiltinType *BT = b->getType()->getAs<BuiltinType>();
6941 if (!BT || !BT->isFloatingPoint())
6944 RTList.push_back(CGT.ConvertType(b->getType()));
6948 return llvm::StructType::get(getVMContext(), RTList,
6949 RD->hasAttr<PackedAttr>());
6955 CoerceToIntArgs(Size, RTList);
6956 return llvm::StructType::get(getVMContext(), RTList);
6959 ABIArgInfo MipsABIInfo::classifyReturnType(QualType RetTy) const {
6960 uint64_t Size = getContext().getTypeSize(RetTy);
6962 if (RetTy->isVoidType())
6963 return ABIArgInfo::getIgnore();
6965 // O32 doesn't treat zero-sized structs differently from other structs.
6966 // However, N32/N64 ignores zero sized return values.
6967 if (!IsO32 && Size == 0)
6968 return ABIArgInfo::getIgnore();
6970 if (isAggregateTypeForABI(RetTy) || RetTy->isVectorType()) {
6972 if (RetTy->isAnyComplexType())
6973 return ABIArgInfo::getDirect();
6975 // O32 returns integer vectors in registers and N32/N64 returns all small
6976 // aggregates in registers.
6978 (RetTy->isVectorType() && !RetTy->hasFloatingRepresentation())) {
6979 ABIArgInfo ArgInfo =
6980 ABIArgInfo::getDirect(returnAggregateInRegs(RetTy, Size));
6981 ArgInfo.setInReg(true);
6986 return getNaturalAlignIndirect(RetTy);
6989 // Treat an enum type as its underlying type.
6990 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
6991 RetTy = EnumTy->getDecl()->getIntegerType();
6993 return (RetTy->isPromotableIntegerType() ?
6994 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
6997 void MipsABIInfo::computeInfo(CGFunctionInfo &FI) const {
6998 ABIArgInfo &RetInfo = FI.getReturnInfo();
6999 if (!getCXXABI().classifyReturnType(FI))
7000 RetInfo = classifyReturnType(FI.getReturnType());
7002 // Check if a pointer to an aggregate is passed as a hidden argument.
7003 uint64_t Offset = RetInfo.isIndirect() ? MinABIStackAlignInBytes : 0;
7005 for (auto &I : FI.arguments())
7006 I.info = classifyArgumentType(I.type, Offset);
7009 Address MipsABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
7010 QualType OrigTy) const {
7011 QualType Ty = OrigTy;
7013 // Integer arguments are promoted to 32-bit on O32 and 64-bit on N32/N64.
7014 // Pointers are also promoted in the same way but this only matters for N32.
7015 unsigned SlotSizeInBits = IsO32 ? 32 : 64;
7016 unsigned PtrWidth = getTarget().getPointerWidth(0);
7017 bool DidPromote = false;
7018 if ((Ty->isIntegerType() &&
7019 getContext().getIntWidth(Ty) < SlotSizeInBits) ||
7020 (Ty->isPointerType() && PtrWidth < SlotSizeInBits)) {
7022 Ty = getContext().getIntTypeForBitwidth(SlotSizeInBits,
7023 Ty->isSignedIntegerType());
7026 auto TyInfo = getContext().getTypeInfoInChars(Ty);
7028 // The alignment of things in the argument area is never larger than
7029 // StackAlignInBytes.
7031 std::min(TyInfo.second, CharUnits::fromQuantity(StackAlignInBytes));
7033 // MinABIStackAlignInBytes is the size of argument slots on the stack.
7034 CharUnits ArgSlotSize = CharUnits::fromQuantity(MinABIStackAlignInBytes);
7036 Address Addr = emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*indirect*/ false,
7037 TyInfo, ArgSlotSize, /*AllowHigherAlign*/ true);
7040 // If there was a promotion, "unpromote" into a temporary.
7041 // TODO: can we just use a pointer into a subset of the original slot?
7043 Address Temp = CGF.CreateMemTemp(OrigTy, "vaarg.promotion-temp");
7044 llvm::Value *Promoted = CGF.Builder.CreateLoad(Addr);
7046 // Truncate down to the right width.
7047 llvm::Type *IntTy = (OrigTy->isIntegerType() ? Temp.getElementType()
7049 llvm::Value *V = CGF.Builder.CreateTrunc(Promoted, IntTy);
7050 if (OrigTy->isPointerType())
7051 V = CGF.Builder.CreateIntToPtr(V, Temp.getElementType());
7053 CGF.Builder.CreateStore(V, Temp);
7060 bool MipsABIInfo::shouldSignExtUnsignedType(QualType Ty) const {
7061 int TySize = getContext().getTypeSize(Ty);
7063 // MIPS64 ABI requires unsigned 32 bit integers to be sign extended.
7064 if (Ty->isUnsignedIntegerOrEnumerationType() && TySize == 32)
7071 MIPSTargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
7072 llvm::Value *Address) const {
7073 // This information comes from gcc's implementation, which seems to
7074 // as canonical as it gets.
7076 // Everything on MIPS is 4 bytes. Double-precision FP registers
7077 // are aliased to pairs of single-precision FP registers.
7078 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4);
7080 // 0-31 are the general purpose registers, $0 - $31.
7081 // 32-63 are the floating-point registers, $f0 - $f31.
7082 // 64 and 65 are the multiply/divide registers, $hi and $lo.
7083 // 66 is the (notional, I think) register for signal-handler return.
7084 AssignToArrayRange(CGF.Builder, Address, Four8, 0, 65);
7086 // 67-74 are the floating-point status registers, $fcc0 - $fcc7.
7087 // They are one bit wide and ignored here.
7089 // 80-111 are the coprocessor 0 registers, $c0r0 - $c0r31.
7090 // (coprocessor 1 is the FP unit)
7091 // 112-143 are the coprocessor 2 registers, $c2r0 - $c2r31.
7092 // 144-175 are the coprocessor 3 registers, $c3r0 - $c3r31.
7093 // 176-181 are the DSP accumulator registers.
7094 AssignToArrayRange(CGF.Builder, Address, Four8, 80, 181);
7098 //===----------------------------------------------------------------------===//
7099 // AVR ABI Implementation.
7100 //===----------------------------------------------------------------------===//
7103 class AVRTargetCodeGenInfo : public TargetCodeGenInfo {
7105 AVRTargetCodeGenInfo(CodeGenTypes &CGT)
7106 : TargetCodeGenInfo(new DefaultABIInfo(CGT)) { }
7108 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
7109 CodeGen::CodeGenModule &CGM,
7110 ForDefinition_t IsForDefinition) const override {
7111 if (!IsForDefinition)
7113 const auto *FD = dyn_cast_or_null<FunctionDecl>(D);
7115 auto *Fn = cast<llvm::Function>(GV);
7117 if (FD->getAttr<AVRInterruptAttr>())
7118 Fn->addFnAttr("interrupt");
7120 if (FD->getAttr<AVRSignalAttr>())
7121 Fn->addFnAttr("signal");
7126 //===----------------------------------------------------------------------===//
7127 // TCE ABI Implementation (see http://tce.cs.tut.fi). Uses mostly the defaults.
7128 // Currently subclassed only to implement custom OpenCL C function attribute
7130 //===----------------------------------------------------------------------===//
7134 class TCETargetCodeGenInfo : public DefaultTargetCodeGenInfo {
7136 TCETargetCodeGenInfo(CodeGenTypes &CGT)
7137 : DefaultTargetCodeGenInfo(CGT) {}
7139 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
7140 CodeGen::CodeGenModule &M,
7141 ForDefinition_t IsForDefinition) const override;
7144 void TCETargetCodeGenInfo::setTargetAttributes(
7145 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M,
7146 ForDefinition_t IsForDefinition) const {
7147 if (!IsForDefinition)
7149 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
7152 llvm::Function *F = cast<llvm::Function>(GV);
7154 if (M.getLangOpts().OpenCL) {
7155 if (FD->hasAttr<OpenCLKernelAttr>()) {
7156 // OpenCL C Kernel functions are not subject to inlining
7157 F->addFnAttr(llvm::Attribute::NoInline);
7158 const ReqdWorkGroupSizeAttr *Attr = FD->getAttr<ReqdWorkGroupSizeAttr>();
7160 // Convert the reqd_work_group_size() attributes to metadata.
7161 llvm::LLVMContext &Context = F->getContext();
7162 llvm::NamedMDNode *OpenCLMetadata =
7163 M.getModule().getOrInsertNamedMetadata(
7164 "opencl.kernel_wg_size_info");
7166 SmallVector<llvm::Metadata *, 5> Operands;
7167 Operands.push_back(llvm::ConstantAsMetadata::get(F));
7170 llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue(
7171 M.Int32Ty, llvm::APInt(32, Attr->getXDim()))));
7173 llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue(
7174 M.Int32Ty, llvm::APInt(32, Attr->getYDim()))));
7176 llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue(
7177 M.Int32Ty, llvm::APInt(32, Attr->getZDim()))));
7179 // Add a boolean constant operand for "required" (true) or "hint"
7180 // (false) for implementing the work_group_size_hint attr later.
7181 // Currently always true as the hint is not yet implemented.
7183 llvm::ConstantAsMetadata::get(llvm::ConstantInt::getTrue(Context)));
7184 OpenCLMetadata->addOperand(llvm::MDNode::get(Context, Operands));
7192 //===----------------------------------------------------------------------===//
7193 // Hexagon ABI Implementation
7194 //===----------------------------------------------------------------------===//
7198 class HexagonABIInfo : public ABIInfo {
7202 HexagonABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
7206 ABIArgInfo classifyReturnType(QualType RetTy) const;
7207 ABIArgInfo classifyArgumentType(QualType RetTy) const;
7209 void computeInfo(CGFunctionInfo &FI) const override;
7211 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
7212 QualType Ty) const override;
7215 class HexagonTargetCodeGenInfo : public TargetCodeGenInfo {
7217 HexagonTargetCodeGenInfo(CodeGenTypes &CGT)
7218 :TargetCodeGenInfo(new HexagonABIInfo(CGT)) {}
7220 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
7227 void HexagonABIInfo::computeInfo(CGFunctionInfo &FI) const {
7228 if (!getCXXABI().classifyReturnType(FI))
7229 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
7230 for (auto &I : FI.arguments())
7231 I.info = classifyArgumentType(I.type);
7234 ABIArgInfo HexagonABIInfo::classifyArgumentType(QualType Ty) const {
7235 if (!isAggregateTypeForABI(Ty)) {
7236 // Treat an enum type as its underlying type.
7237 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
7238 Ty = EnumTy->getDecl()->getIntegerType();
7240 return (Ty->isPromotableIntegerType() ?
7241 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
7244 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
7245 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
7247 // Ignore empty records.
7248 if (isEmptyRecord(getContext(), Ty, true))
7249 return ABIArgInfo::getIgnore();
7251 uint64_t Size = getContext().getTypeSize(Ty);
7253 return getNaturalAlignIndirect(Ty, /*ByVal=*/true);
7254 // Pass in the smallest viable integer type.
7256 return ABIArgInfo::getDirect(llvm::Type::getInt64Ty(getVMContext()));
7258 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
7260 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
7262 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
7265 ABIArgInfo HexagonABIInfo::classifyReturnType(QualType RetTy) const {
7266 if (RetTy->isVoidType())
7267 return ABIArgInfo::getIgnore();
7269 // Large vector types should be returned via memory.
7270 if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 64)
7271 return getNaturalAlignIndirect(RetTy);
7273 if (!isAggregateTypeForABI(RetTy)) {
7274 // Treat an enum type as its underlying type.
7275 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
7276 RetTy = EnumTy->getDecl()->getIntegerType();
7278 return (RetTy->isPromotableIntegerType() ?
7279 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
7282 if (isEmptyRecord(getContext(), RetTy, true))
7283 return ABIArgInfo::getIgnore();
7285 // Aggregates <= 8 bytes are returned in r0; other aggregates
7286 // are returned indirectly.
7287 uint64_t Size = getContext().getTypeSize(RetTy);
7289 // Return in the smallest viable integer type.
7291 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
7293 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
7295 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
7296 return ABIArgInfo::getDirect(llvm::Type::getInt64Ty(getVMContext()));
7299 return getNaturalAlignIndirect(RetTy, /*ByVal=*/true);
7302 Address HexagonABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
7303 QualType Ty) const {
7304 // FIXME: Someone needs to audit that this handle alignment correctly.
7305 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*indirect*/ false,
7306 getContext().getTypeInfoInChars(Ty),
7307 CharUnits::fromQuantity(4),
7308 /*AllowHigherAlign*/ true);
7311 //===----------------------------------------------------------------------===//
7312 // Lanai ABI Implementation
7313 //===----------------------------------------------------------------------===//
7316 class LanaiABIInfo : public DefaultABIInfo {
7318 LanaiABIInfo(CodeGen::CodeGenTypes &CGT) : DefaultABIInfo(CGT) {}
7320 bool shouldUseInReg(QualType Ty, CCState &State) const;
7322 void computeInfo(CGFunctionInfo &FI) const override {
7323 CCState State(FI.getCallingConvention());
7324 // Lanai uses 4 registers to pass arguments unless the function has the
7325 // regparm attribute set.
7326 if (FI.getHasRegParm()) {
7327 State.FreeRegs = FI.getRegParm();
7332 if (!getCXXABI().classifyReturnType(FI))
7333 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
7334 for (auto &I : FI.arguments())
7335 I.info = classifyArgumentType(I.type, State);
7338 ABIArgInfo getIndirectResult(QualType Ty, bool ByVal, CCState &State) const;
7339 ABIArgInfo classifyArgumentType(QualType RetTy, CCState &State) const;
7341 } // end anonymous namespace
7343 bool LanaiABIInfo::shouldUseInReg(QualType Ty, CCState &State) const {
7344 unsigned Size = getContext().getTypeSize(Ty);
7345 unsigned SizeInRegs = llvm::alignTo(Size, 32U) / 32U;
7347 if (SizeInRegs == 0)
7350 if (SizeInRegs > State.FreeRegs) {
7355 State.FreeRegs -= SizeInRegs;
7360 ABIArgInfo LanaiABIInfo::getIndirectResult(QualType Ty, bool ByVal,
7361 CCState &State) const {
7363 if (State.FreeRegs) {
7364 --State.FreeRegs; // Non-byval indirects just use one pointer.
7365 return getNaturalAlignIndirectInReg(Ty);
7367 return getNaturalAlignIndirect(Ty, false);
7370 // Compute the byval alignment.
7371 const unsigned MinABIStackAlignInBytes = 4;
7372 unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8;
7373 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(4), /*ByVal=*/true,
7374 /*Realign=*/TypeAlign >
7375 MinABIStackAlignInBytes);
7378 ABIArgInfo LanaiABIInfo::classifyArgumentType(QualType Ty,
7379 CCState &State) const {
7380 // Check with the C++ ABI first.
7381 const RecordType *RT = Ty->getAs<RecordType>();
7383 CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI());
7384 if (RAA == CGCXXABI::RAA_Indirect) {
7385 return getIndirectResult(Ty, /*ByVal=*/false, State);
7386 } else if (RAA == CGCXXABI::RAA_DirectInMemory) {
7387 return getNaturalAlignIndirect(Ty, /*ByRef=*/true);
7391 if (isAggregateTypeForABI(Ty)) {
7392 // Structures with flexible arrays are always indirect.
7393 if (RT && RT->getDecl()->hasFlexibleArrayMember())
7394 return getIndirectResult(Ty, /*ByVal=*/true, State);
7396 // Ignore empty structs/unions.
7397 if (isEmptyRecord(getContext(), Ty, true))
7398 return ABIArgInfo::getIgnore();
7400 llvm::LLVMContext &LLVMContext = getVMContext();
7401 unsigned SizeInRegs = (getContext().getTypeSize(Ty) + 31) / 32;
7402 if (SizeInRegs <= State.FreeRegs) {
7403 llvm::IntegerType *Int32 = llvm::Type::getInt32Ty(LLVMContext);
7404 SmallVector<llvm::Type *, 3> Elements(SizeInRegs, Int32);
7405 llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements);
7406 State.FreeRegs -= SizeInRegs;
7407 return ABIArgInfo::getDirectInReg(Result);
7411 return getIndirectResult(Ty, true, State);
7414 // Treat an enum type as its underlying type.
7415 if (const auto *EnumTy = Ty->getAs<EnumType>())
7416 Ty = EnumTy->getDecl()->getIntegerType();
7418 bool InReg = shouldUseInReg(Ty, State);
7419 if (Ty->isPromotableIntegerType()) {
7421 return ABIArgInfo::getDirectInReg();
7422 return ABIArgInfo::getExtend();
7425 return ABIArgInfo::getDirectInReg();
7426 return ABIArgInfo::getDirect();
7430 class LanaiTargetCodeGenInfo : public TargetCodeGenInfo {
7432 LanaiTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
7433 : TargetCodeGenInfo(new LanaiABIInfo(CGT)) {}
7437 //===----------------------------------------------------------------------===//
7438 // AMDGPU ABI Implementation
7439 //===----------------------------------------------------------------------===//
7443 class AMDGPUABIInfo final : public DefaultABIInfo {
7445 static const unsigned MaxNumRegsForArgsRet = 16;
7447 unsigned numRegsForType(QualType Ty) const;
7449 bool isHomogeneousAggregateBaseType(QualType Ty) const override;
7450 bool isHomogeneousAggregateSmallEnough(const Type *Base,
7451 uint64_t Members) const override;
7454 explicit AMDGPUABIInfo(CodeGen::CodeGenTypes &CGT) :
7455 DefaultABIInfo(CGT) {}
7457 ABIArgInfo classifyReturnType(QualType RetTy) const;
7458 ABIArgInfo classifyKernelArgumentType(QualType Ty) const;
7459 ABIArgInfo classifyArgumentType(QualType Ty, unsigned &NumRegsLeft) const;
7461 void computeInfo(CGFunctionInfo &FI) const override;
7464 bool AMDGPUABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
7468 bool AMDGPUABIInfo::isHomogeneousAggregateSmallEnough(
7469 const Type *Base, uint64_t Members) const {
7470 uint32_t NumRegs = (getContext().getTypeSize(Base) + 31) / 32;
7472 // Homogeneous Aggregates may occupy at most 16 registers.
7473 return Members * NumRegs <= MaxNumRegsForArgsRet;
7476 /// Estimate number of registers the type will use when passed in registers.
7477 unsigned AMDGPUABIInfo::numRegsForType(QualType Ty) const {
7478 unsigned NumRegs = 0;
7480 if (const VectorType *VT = Ty->getAs<VectorType>()) {
7481 // Compute from the number of elements. The reported size is based on the
7482 // in-memory size, which includes the padding 4th element for 3-vectors.
7483 QualType EltTy = VT->getElementType();
7484 unsigned EltSize = getContext().getTypeSize(EltTy);
7486 // 16-bit element vectors should be passed as packed.
7488 return (VT->getNumElements() + 1) / 2;
7490 unsigned EltNumRegs = (EltSize + 31) / 32;
7491 return EltNumRegs * VT->getNumElements();
7494 if (const RecordType *RT = Ty->getAs<RecordType>()) {
7495 const RecordDecl *RD = RT->getDecl();
7496 assert(!RD->hasFlexibleArrayMember());
7498 for (const FieldDecl *Field : RD->fields()) {
7499 QualType FieldTy = Field->getType();
7500 NumRegs += numRegsForType(FieldTy);
7506 return (getContext().getTypeSize(Ty) + 31) / 32;
7509 void AMDGPUABIInfo::computeInfo(CGFunctionInfo &FI) const {
7510 llvm::CallingConv::ID CC = FI.getCallingConvention();
7512 if (!getCXXABI().classifyReturnType(FI))
7513 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
7515 unsigned NumRegsLeft = MaxNumRegsForArgsRet;
7516 for (auto &Arg : FI.arguments()) {
7517 if (CC == llvm::CallingConv::AMDGPU_KERNEL) {
7518 Arg.info = classifyKernelArgumentType(Arg.type);
7520 Arg.info = classifyArgumentType(Arg.type, NumRegsLeft);
7525 ABIArgInfo AMDGPUABIInfo::classifyReturnType(QualType RetTy) const {
7526 if (isAggregateTypeForABI(RetTy)) {
7527 // Records with non-trivial destructors/copy-constructors should not be
7528 // returned by value.
7529 if (!getRecordArgABI(RetTy, getCXXABI())) {
7530 // Ignore empty structs/unions.
7531 if (isEmptyRecord(getContext(), RetTy, true))
7532 return ABIArgInfo::getIgnore();
7534 // Lower single-element structs to just return a regular value.
7535 if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext()))
7536 return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
7538 if (const RecordType *RT = RetTy->getAs<RecordType>()) {
7539 const RecordDecl *RD = RT->getDecl();
7540 if (RD->hasFlexibleArrayMember())
7541 return DefaultABIInfo::classifyReturnType(RetTy);
7544 // Pack aggregates <= 4 bytes into single VGPR or pair.
7545 uint64_t Size = getContext().getTypeSize(RetTy);
7547 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
7550 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
7553 llvm::Type *I32Ty = llvm::Type::getInt32Ty(getVMContext());
7554 return ABIArgInfo::getDirect(llvm::ArrayType::get(I32Ty, 2));
7557 if (numRegsForType(RetTy) <= MaxNumRegsForArgsRet)
7558 return ABIArgInfo::getDirect();
7562 // Otherwise just do the default thing.
7563 return DefaultABIInfo::classifyReturnType(RetTy);
7566 /// For kernels all parameters are really passed in a special buffer. It doesn't
7567 /// make sense to pass anything byval, so everything must be direct.
7568 ABIArgInfo AMDGPUABIInfo::classifyKernelArgumentType(QualType Ty) const {
7569 Ty = useFirstFieldIfTransparentUnion(Ty);
7571 // TODO: Can we omit empty structs?
7573 // Coerce single element structs to its element.
7574 if (const Type *SeltTy = isSingleElementStruct(Ty, getContext()))
7575 return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
7577 // If we set CanBeFlattened to true, CodeGen will expand the struct to its
7578 // individual elements, which confuses the Clover OpenCL backend; therefore we
7579 // have to set it to false here. Other args of getDirect() are just defaults.
7580 return ABIArgInfo::getDirect(nullptr, 0, nullptr, false);
7583 ABIArgInfo AMDGPUABIInfo::classifyArgumentType(QualType Ty,
7584 unsigned &NumRegsLeft) const {
7585 assert(NumRegsLeft <= MaxNumRegsForArgsRet && "register estimate underflow");
7587 Ty = useFirstFieldIfTransparentUnion(Ty);
7589 if (isAggregateTypeForABI(Ty)) {
7590 // Records with non-trivial destructors/copy-constructors should not be
7592 if (auto RAA = getRecordArgABI(Ty, getCXXABI()))
7593 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
7595 // Ignore empty structs/unions.
7596 if (isEmptyRecord(getContext(), Ty, true))
7597 return ABIArgInfo::getIgnore();
7599 // Lower single-element structs to just pass a regular value. TODO: We
7600 // could do reasonable-size multiple-element structs too, using getExpand(),
7601 // though watch out for things like bitfields.
7602 if (const Type *SeltTy = isSingleElementStruct(Ty, getContext()))
7603 return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
7605 if (const RecordType *RT = Ty->getAs<RecordType>()) {
7606 const RecordDecl *RD = RT->getDecl();
7607 if (RD->hasFlexibleArrayMember())
7608 return DefaultABIInfo::classifyArgumentType(Ty);
7611 // Pack aggregates <= 8 bytes into single VGPR or pair.
7612 uint64_t Size = getContext().getTypeSize(Ty);
7614 unsigned NumRegs = (Size + 31) / 32;
7615 NumRegsLeft -= std::min(NumRegsLeft, NumRegs);
7618 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
7621 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
7623 // XXX: Should this be i64 instead, and should the limit increase?
7624 llvm::Type *I32Ty = llvm::Type::getInt32Ty(getVMContext());
7625 return ABIArgInfo::getDirect(llvm::ArrayType::get(I32Ty, 2));
7628 if (NumRegsLeft > 0) {
7629 unsigned NumRegs = numRegsForType(Ty);
7630 if (NumRegsLeft >= NumRegs) {
7631 NumRegsLeft -= NumRegs;
7632 return ABIArgInfo::getDirect();
7637 // Otherwise just do the default thing.
7638 ABIArgInfo ArgInfo = DefaultABIInfo::classifyArgumentType(Ty);
7639 if (!ArgInfo.isIndirect()) {
7640 unsigned NumRegs = numRegsForType(Ty);
7641 NumRegsLeft -= std::min(NumRegs, NumRegsLeft);
7647 class AMDGPUTargetCodeGenInfo : public TargetCodeGenInfo {
7649 AMDGPUTargetCodeGenInfo(CodeGenTypes &CGT)
7650 : TargetCodeGenInfo(new AMDGPUABIInfo(CGT)) {}
7651 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
7652 CodeGen::CodeGenModule &M,
7653 ForDefinition_t IsForDefinition) const override;
7654 unsigned getOpenCLKernelCallingConv() const override;
7656 llvm::Constant *getNullPointer(const CodeGen::CodeGenModule &CGM,
7657 llvm::PointerType *T, QualType QT) const override;
7659 LangAS getASTAllocaAddressSpace() const override {
7660 return getLangASFromTargetAS(
7661 getABIInfo().getDataLayout().getAllocaAddrSpace());
7663 LangAS getGlobalVarAddressSpace(CodeGenModule &CGM,
7664 const VarDecl *D) const override;
7665 llvm::SyncScope::ID getLLVMSyncScopeID(SyncScope S,
7666 llvm::LLVMContext &C) const override;
7668 createEnqueuedBlockKernel(CodeGenFunction &CGF,
7669 llvm::Function *BlockInvokeFunc,
7670 llvm::Value *BlockLiteral) const override;
7674 void AMDGPUTargetCodeGenInfo::setTargetAttributes(
7675 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M,
7676 ForDefinition_t IsForDefinition) const {
7677 if (!IsForDefinition)
7679 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
7683 llvm::Function *F = cast<llvm::Function>(GV);
7685 const auto *ReqdWGS = M.getLangOpts().OpenCL ?
7686 FD->getAttr<ReqdWorkGroupSizeAttr>() : nullptr;
7687 const auto *FlatWGS = FD->getAttr<AMDGPUFlatWorkGroupSizeAttr>();
7688 if (ReqdWGS || FlatWGS) {
7689 unsigned Min = FlatWGS ? FlatWGS->getMin() : 0;
7690 unsigned Max = FlatWGS ? FlatWGS->getMax() : 0;
7691 if (ReqdWGS && Min == 0 && Max == 0)
7692 Min = Max = ReqdWGS->getXDim() * ReqdWGS->getYDim() * ReqdWGS->getZDim();
7695 assert(Min <= Max && "Min must be less than or equal Max");
7697 std::string AttrVal = llvm::utostr(Min) + "," + llvm::utostr(Max);
7698 F->addFnAttr("amdgpu-flat-work-group-size", AttrVal);
7700 assert(Max == 0 && "Max must be zero");
7703 if (const auto *Attr = FD->getAttr<AMDGPUWavesPerEUAttr>()) {
7704 unsigned Min = Attr->getMin();
7705 unsigned Max = Attr->getMax();
7708 assert((Max == 0 || Min <= Max) && "Min must be less than or equal Max");
7710 std::string AttrVal = llvm::utostr(Min);
7712 AttrVal = AttrVal + "," + llvm::utostr(Max);
7713 F->addFnAttr("amdgpu-waves-per-eu", AttrVal);
7715 assert(Max == 0 && "Max must be zero");
7718 if (const auto *Attr = FD->getAttr<AMDGPUNumSGPRAttr>()) {
7719 unsigned NumSGPR = Attr->getNumSGPR();
7722 F->addFnAttr("amdgpu-num-sgpr", llvm::utostr(NumSGPR));
7725 if (const auto *Attr = FD->getAttr<AMDGPUNumVGPRAttr>()) {
7726 uint32_t NumVGPR = Attr->getNumVGPR();
7729 F->addFnAttr("amdgpu-num-vgpr", llvm::utostr(NumVGPR));
7733 unsigned AMDGPUTargetCodeGenInfo::getOpenCLKernelCallingConv() const {
7734 return llvm::CallingConv::AMDGPU_KERNEL;
7737 // Currently LLVM assumes null pointers always have value 0,
7738 // which results in incorrectly transformed IR. Therefore, instead of
7739 // emitting null pointers in private and local address spaces, a null
7740 // pointer in generic address space is emitted which is casted to a
7741 // pointer in local or private address space.
7742 llvm::Constant *AMDGPUTargetCodeGenInfo::getNullPointer(
7743 const CodeGen::CodeGenModule &CGM, llvm::PointerType *PT,
7744 QualType QT) const {
7745 if (CGM.getContext().getTargetNullPointerValue(QT) == 0)
7746 return llvm::ConstantPointerNull::get(PT);
7748 auto &Ctx = CGM.getContext();
7749 auto NPT = llvm::PointerType::get(PT->getElementType(),
7750 Ctx.getTargetAddressSpace(LangAS::opencl_generic));
7751 return llvm::ConstantExpr::getAddrSpaceCast(
7752 llvm::ConstantPointerNull::get(NPT), PT);
7756 AMDGPUTargetCodeGenInfo::getGlobalVarAddressSpace(CodeGenModule &CGM,
7757 const VarDecl *D) const {
7758 assert(!CGM.getLangOpts().OpenCL &&
7759 !(CGM.getLangOpts().CUDA && CGM.getLangOpts().CUDAIsDevice) &&
7760 "Address space agnostic languages only");
7761 LangAS DefaultGlobalAS = getLangASFromTargetAS(
7762 CGM.getContext().getTargetAddressSpace(LangAS::opencl_global));
7764 return DefaultGlobalAS;
7766 LangAS AddrSpace = D->getType().getAddressSpace();
7767 assert(AddrSpace == LangAS::Default || isTargetAddressSpace(AddrSpace));
7768 if (AddrSpace != LangAS::Default)
7771 if (CGM.isTypeConstant(D->getType(), false)) {
7772 if (auto ConstAS = CGM.getTarget().getConstantAddressSpace())
7773 return ConstAS.getValue();
7775 return DefaultGlobalAS;
7779 AMDGPUTargetCodeGenInfo::getLLVMSyncScopeID(SyncScope S,
7780 llvm::LLVMContext &C) const {
7783 case SyncScope::OpenCLWorkGroup:
7786 case SyncScope::OpenCLDevice:
7789 case SyncScope::OpenCLAllSVMDevices:
7792 case SyncScope::OpenCLSubGroup:
7795 return C.getOrInsertSyncScopeID(Name);
7798 //===----------------------------------------------------------------------===//
7799 // SPARC v8 ABI Implementation.
7800 // Based on the SPARC Compliance Definition version 2.4.1.
7802 // Ensures that complex values are passed in registers.
7805 class SparcV8ABIInfo : public DefaultABIInfo {
7807 SparcV8ABIInfo(CodeGenTypes &CGT) : DefaultABIInfo(CGT) {}
7810 ABIArgInfo classifyReturnType(QualType RetTy) const;
7811 void computeInfo(CGFunctionInfo &FI) const override;
7813 } // end anonymous namespace
7817 SparcV8ABIInfo::classifyReturnType(QualType Ty) const {
7818 if (Ty->isAnyComplexType()) {
7819 return ABIArgInfo::getDirect();
7822 return DefaultABIInfo::classifyReturnType(Ty);
7826 void SparcV8ABIInfo::computeInfo(CGFunctionInfo &FI) const {
7828 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
7829 for (auto &Arg : FI.arguments())
7830 Arg.info = classifyArgumentType(Arg.type);
7834 class SparcV8TargetCodeGenInfo : public TargetCodeGenInfo {
7836 SparcV8TargetCodeGenInfo(CodeGenTypes &CGT)
7837 : TargetCodeGenInfo(new SparcV8ABIInfo(CGT)) {}
7839 } // end anonymous namespace
7841 //===----------------------------------------------------------------------===//
7842 // SPARC v9 ABI Implementation.
7843 // Based on the SPARC Compliance Definition version 2.4.1.
7845 // Function arguments a mapped to a nominal "parameter array" and promoted to
7846 // registers depending on their type. Each argument occupies 8 or 16 bytes in
7847 // the array, structs larger than 16 bytes are passed indirectly.
7849 // One case requires special care:
7856 // When a struct mixed is passed by value, it only occupies 8 bytes in the
7857 // parameter array, but the int is passed in an integer register, and the float
7858 // is passed in a floating point register. This is represented as two arguments
7859 // with the LLVM IR inreg attribute:
7861 // declare void f(i32 inreg %i, float inreg %f)
7863 // The code generator will only allocate 4 bytes from the parameter array for
7864 // the inreg arguments. All other arguments are allocated a multiple of 8
7868 class SparcV9ABIInfo : public ABIInfo {
7870 SparcV9ABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
7873 ABIArgInfo classifyType(QualType RetTy, unsigned SizeLimit) const;
7874 void computeInfo(CGFunctionInfo &FI) const override;
7875 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
7876 QualType Ty) const override;
7878 // Coercion type builder for structs passed in registers. The coercion type
7879 // serves two purposes:
7881 // 1. Pad structs to a multiple of 64 bits, so they are passed 'left-aligned'
7883 // 2. Expose aligned floating point elements as first-level elements, so the
7884 // code generator knows to pass them in floating point registers.
7886 // We also compute the InReg flag which indicates that the struct contains
7887 // aligned 32-bit floats.
7889 struct CoerceBuilder {
7890 llvm::LLVMContext &Context;
7891 const llvm::DataLayout &DL;
7892 SmallVector<llvm::Type*, 8> Elems;
7896 CoerceBuilder(llvm::LLVMContext &c, const llvm::DataLayout &dl)
7897 : Context(c), DL(dl), Size(0), InReg(false) {}
7899 // Pad Elems with integers until Size is ToSize.
7900 void pad(uint64_t ToSize) {
7901 assert(ToSize >= Size && "Cannot remove elements");
7905 // Finish the current 64-bit word.
7906 uint64_t Aligned = llvm::alignTo(Size, 64);
7907 if (Aligned > Size && Aligned <= ToSize) {
7908 Elems.push_back(llvm::IntegerType::get(Context, Aligned - Size));
7912 // Add whole 64-bit words.
7913 while (Size + 64 <= ToSize) {
7914 Elems.push_back(llvm::Type::getInt64Ty(Context));
7918 // Final in-word padding.
7919 if (Size < ToSize) {
7920 Elems.push_back(llvm::IntegerType::get(Context, ToSize - Size));
7925 // Add a floating point element at Offset.
7926 void addFloat(uint64_t Offset, llvm::Type *Ty, unsigned Bits) {
7927 // Unaligned floats are treated as integers.
7930 // The InReg flag is only required if there are any floats < 64 bits.
7934 Elems.push_back(Ty);
7935 Size = Offset + Bits;
7938 // Add a struct type to the coercion type, starting at Offset (in bits).
7939 void addStruct(uint64_t Offset, llvm::StructType *StrTy) {
7940 const llvm::StructLayout *Layout = DL.getStructLayout(StrTy);
7941 for (unsigned i = 0, e = StrTy->getNumElements(); i != e; ++i) {
7942 llvm::Type *ElemTy = StrTy->getElementType(i);
7943 uint64_t ElemOffset = Offset + Layout->getElementOffsetInBits(i);
7944 switch (ElemTy->getTypeID()) {
7945 case llvm::Type::StructTyID:
7946 addStruct(ElemOffset, cast<llvm::StructType>(ElemTy));
7948 case llvm::Type::FloatTyID:
7949 addFloat(ElemOffset, ElemTy, 32);
7951 case llvm::Type::DoubleTyID:
7952 addFloat(ElemOffset, ElemTy, 64);
7954 case llvm::Type::FP128TyID:
7955 addFloat(ElemOffset, ElemTy, 128);
7957 case llvm::Type::PointerTyID:
7958 if (ElemOffset % 64 == 0) {
7960 Elems.push_back(ElemTy);
7970 // Check if Ty is a usable substitute for the coercion type.
7971 bool isUsableType(llvm::StructType *Ty) const {
7972 return llvm::makeArrayRef(Elems) == Ty->elements();
7975 // Get the coercion type as a literal struct type.
7976 llvm::Type *getType() const {
7977 if (Elems.size() == 1)
7978 return Elems.front();
7980 return llvm::StructType::get(Context, Elems);
7984 } // end anonymous namespace
7987 SparcV9ABIInfo::classifyType(QualType Ty, unsigned SizeLimit) const {
7988 if (Ty->isVoidType())
7989 return ABIArgInfo::getIgnore();
7991 uint64_t Size = getContext().getTypeSize(Ty);
7993 // Anything too big to fit in registers is passed with an explicit indirect
7994 // pointer / sret pointer.
7995 if (Size > SizeLimit)
7996 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
7998 // Treat an enum type as its underlying type.
7999 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
8000 Ty = EnumTy->getDecl()->getIntegerType();
8002 // Integer types smaller than a register are extended.
8003 if (Size < 64 && Ty->isIntegerType())
8004 return ABIArgInfo::getExtend();
8006 // Other non-aggregates go in registers.
8007 if (!isAggregateTypeForABI(Ty))
8008 return ABIArgInfo::getDirect();
8010 // If a C++ object has either a non-trivial copy constructor or a non-trivial
8011 // destructor, it is passed with an explicit indirect pointer / sret pointer.
8012 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
8013 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
8015 // This is a small aggregate type that should be passed in registers.
8016 // Build a coercion type from the LLVM struct type.
8017 llvm::StructType *StrTy = dyn_cast<llvm::StructType>(CGT.ConvertType(Ty));
8019 return ABIArgInfo::getDirect();
8021 CoerceBuilder CB(getVMContext(), getDataLayout());
8022 CB.addStruct(0, StrTy);
8023 CB.pad(llvm::alignTo(CB.DL.getTypeSizeInBits(StrTy), 64));
8025 // Try to use the original type for coercion.
8026 llvm::Type *CoerceTy = CB.isUsableType(StrTy) ? StrTy : CB.getType();
8029 return ABIArgInfo::getDirectInReg(CoerceTy);
8031 return ABIArgInfo::getDirect(CoerceTy);
8034 Address SparcV9ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
8035 QualType Ty) const {
8036 ABIArgInfo AI = classifyType(Ty, 16 * 8);
8037 llvm::Type *ArgTy = CGT.ConvertType(Ty);
8038 if (AI.canHaveCoerceToType() && !AI.getCoerceToType())
8039 AI.setCoerceToType(ArgTy);
8041 CharUnits SlotSize = CharUnits::fromQuantity(8);
8043 CGBuilderTy &Builder = CGF.Builder;
8044 Address Addr(Builder.CreateLoad(VAListAddr, "ap.cur"), SlotSize);
8045 llvm::Type *ArgPtrTy = llvm::PointerType::getUnqual(ArgTy);
8047 auto TypeInfo = getContext().getTypeInfoInChars(Ty);
8049 Address ArgAddr = Address::invalid();
8051 switch (AI.getKind()) {
8052 case ABIArgInfo::Expand:
8053 case ABIArgInfo::CoerceAndExpand:
8054 case ABIArgInfo::InAlloca:
8055 llvm_unreachable("Unsupported ABI kind for va_arg");
8057 case ABIArgInfo::Extend: {
8059 CharUnits Offset = SlotSize - TypeInfo.first;
8060 ArgAddr = Builder.CreateConstInBoundsByteGEP(Addr, Offset, "extend");
8064 case ABIArgInfo::Direct: {
8065 auto AllocSize = getDataLayout().getTypeAllocSize(AI.getCoerceToType());
8066 Stride = CharUnits::fromQuantity(AllocSize).alignTo(SlotSize);
8071 case ABIArgInfo::Indirect:
8073 ArgAddr = Builder.CreateElementBitCast(Addr, ArgPtrTy, "indirect");
8074 ArgAddr = Address(Builder.CreateLoad(ArgAddr, "indirect.arg"),
8078 case ABIArgInfo::Ignore:
8079 return Address(llvm::UndefValue::get(ArgPtrTy), TypeInfo.second);
8083 llvm::Value *NextPtr =
8084 Builder.CreateConstInBoundsByteGEP(Addr.getPointer(), Stride, "ap.next");
8085 Builder.CreateStore(NextPtr, VAListAddr);
8087 return Builder.CreateBitCast(ArgAddr, ArgPtrTy, "arg.addr");
8090 void SparcV9ABIInfo::computeInfo(CGFunctionInfo &FI) const {
8091 FI.getReturnInfo() = classifyType(FI.getReturnType(), 32 * 8);
8092 for (auto &I : FI.arguments())
8093 I.info = classifyType(I.type, 16 * 8);
8097 class SparcV9TargetCodeGenInfo : public TargetCodeGenInfo {
8099 SparcV9TargetCodeGenInfo(CodeGenTypes &CGT)
8100 : TargetCodeGenInfo(new SparcV9ABIInfo(CGT)) {}
8102 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
8106 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
8107 llvm::Value *Address) const override;
8109 } // end anonymous namespace
8112 SparcV9TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
8113 llvm::Value *Address) const {
8114 // This is calculated from the LLVM and GCC tables and verified
8115 // against gcc output. AFAIK all ABIs use the same encoding.
8117 CodeGen::CGBuilderTy &Builder = CGF.Builder;
8119 llvm::IntegerType *i8 = CGF.Int8Ty;
8120 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
8121 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
8123 // 0-31: the 8-byte general-purpose registers
8124 AssignToArrayRange(Builder, Address, Eight8, 0, 31);
8126 // 32-63: f0-31, the 4-byte floating-point registers
8127 AssignToArrayRange(Builder, Address, Four8, 32, 63);
8137 AssignToArrayRange(Builder, Address, Eight8, 64, 71);
8139 // 72-87: d0-15, the 8-byte floating-point registers
8140 AssignToArrayRange(Builder, Address, Eight8, 72, 87);
8146 //===----------------------------------------------------------------------===//
8147 // XCore ABI Implementation
8148 //===----------------------------------------------------------------------===//
8152 /// A SmallStringEnc instance is used to build up the TypeString by passing
8153 /// it by reference between functions that append to it.
8154 typedef llvm::SmallString<128> SmallStringEnc;
8156 /// TypeStringCache caches the meta encodings of Types.
8158 /// The reason for caching TypeStrings is two fold:
8159 /// 1. To cache a type's encoding for later uses;
8160 /// 2. As a means to break recursive member type inclusion.
8162 /// A cache Entry can have a Status of:
8163 /// NonRecursive: The type encoding is not recursive;
8164 /// Recursive: The type encoding is recursive;
8165 /// Incomplete: An incomplete TypeString;
8166 /// IncompleteUsed: An incomplete TypeString that has been used in a
8167 /// Recursive type encoding.
8169 /// A NonRecursive entry will have all of its sub-members expanded as fully
8170 /// as possible. Whilst it may contain types which are recursive, the type
8171 /// itself is not recursive and thus its encoding may be safely used whenever
8172 /// the type is encountered.
8174 /// A Recursive entry will have all of its sub-members expanded as fully as
8175 /// possible. The type itself is recursive and it may contain other types which
8176 /// are recursive. The Recursive encoding must not be used during the expansion
8177 /// of a recursive type's recursive branch. For simplicity the code uses
8178 /// IncompleteCount to reject all usage of Recursive encodings for member types.
8180 /// An Incomplete entry is always a RecordType and only encodes its
8181 /// identifier e.g. "s(S){}". Incomplete 'StubEnc' entries are ephemeral and
8182 /// are placed into the cache during type expansion as a means to identify and
8183 /// handle recursive inclusion of types as sub-members. If there is recursion
8184 /// the entry becomes IncompleteUsed.
8186 /// During the expansion of a RecordType's members:
8188 /// If the cache contains a NonRecursive encoding for the member type, the
8189 /// cached encoding is used;
8191 /// If the cache contains a Recursive encoding for the member type, the
8192 /// cached encoding is 'Swapped' out, as it may be incorrect, and...
8194 /// If the member is a RecordType, an Incomplete encoding is placed into the
8195 /// cache to break potential recursive inclusion of itself as a sub-member;
8197 /// Once a member RecordType has been expanded, its temporary incomplete
8198 /// entry is removed from the cache. If a Recursive encoding was swapped out
8199 /// it is swapped back in;
8201 /// If an incomplete entry is used to expand a sub-member, the incomplete
8202 /// entry is marked as IncompleteUsed. The cache keeps count of how many
8203 /// IncompleteUsed entries it currently contains in IncompleteUsedCount;
8205 /// If a member's encoding is found to be a NonRecursive or Recursive viz:
8206 /// IncompleteUsedCount==0, the member's encoding is added to the cache.
8207 /// Else the member is part of a recursive type and thus the recursion has
8208 /// been exited too soon for the encoding to be correct for the member.
8210 class TypeStringCache {
8211 enum Status {NonRecursive, Recursive, Incomplete, IncompleteUsed};
8213 std::string Str; // The encoded TypeString for the type.
8214 enum Status State; // Information about the encoding in 'Str'.
8215 std::string Swapped; // A temporary place holder for a Recursive encoding
8216 // during the expansion of RecordType's members.
8218 std::map<const IdentifierInfo *, struct Entry> Map;
8219 unsigned IncompleteCount; // Number of Incomplete entries in the Map.
8220 unsigned IncompleteUsedCount; // Number of IncompleteUsed entries in the Map.
8222 TypeStringCache() : IncompleteCount(0), IncompleteUsedCount(0) {}
8223 void addIncomplete(const IdentifierInfo *ID, std::string StubEnc);
8224 bool removeIncomplete(const IdentifierInfo *ID);
8225 void addIfComplete(const IdentifierInfo *ID, StringRef Str,
8227 StringRef lookupStr(const IdentifierInfo *ID);
8230 /// TypeString encodings for enum & union fields must be order.
8231 /// FieldEncoding is a helper for this ordering process.
8232 class FieldEncoding {
8236 FieldEncoding(bool b, SmallStringEnc &e) : HasName(b), Enc(e.c_str()) {}
8237 StringRef str() { return Enc; }
8238 bool operator<(const FieldEncoding &rhs) const {
8239 if (HasName != rhs.HasName) return HasName;
8240 return Enc < rhs.Enc;
8244 class XCoreABIInfo : public DefaultABIInfo {
8246 XCoreABIInfo(CodeGen::CodeGenTypes &CGT) : DefaultABIInfo(CGT) {}
8247 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
8248 QualType Ty) const override;
8251 class XCoreTargetCodeGenInfo : public TargetCodeGenInfo {
8252 mutable TypeStringCache TSC;
8254 XCoreTargetCodeGenInfo(CodeGenTypes &CGT)
8255 :TargetCodeGenInfo(new XCoreABIInfo(CGT)) {}
8256 void emitTargetMD(const Decl *D, llvm::GlobalValue *GV,
8257 CodeGen::CodeGenModule &M) const override;
8260 } // End anonymous namespace.
8262 // TODO: this implementation is likely now redundant with the default
8264 Address XCoreABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
8265 QualType Ty) const {
8266 CGBuilderTy &Builder = CGF.Builder;
8269 CharUnits SlotSize = CharUnits::fromQuantity(4);
8270 Address AP(Builder.CreateLoad(VAListAddr), SlotSize);
8272 // Handle the argument.
8273 ABIArgInfo AI = classifyArgumentType(Ty);
8274 CharUnits TypeAlign = getContext().getTypeAlignInChars(Ty);
8275 llvm::Type *ArgTy = CGT.ConvertType(Ty);
8276 if (AI.canHaveCoerceToType() && !AI.getCoerceToType())
8277 AI.setCoerceToType(ArgTy);
8278 llvm::Type *ArgPtrTy = llvm::PointerType::getUnqual(ArgTy);
8280 Address Val = Address::invalid();
8281 CharUnits ArgSize = CharUnits::Zero();
8282 switch (AI.getKind()) {
8283 case ABIArgInfo::Expand:
8284 case ABIArgInfo::CoerceAndExpand:
8285 case ABIArgInfo::InAlloca:
8286 llvm_unreachable("Unsupported ABI kind for va_arg");
8287 case ABIArgInfo::Ignore:
8288 Val = Address(llvm::UndefValue::get(ArgPtrTy), TypeAlign);
8289 ArgSize = CharUnits::Zero();
8291 case ABIArgInfo::Extend:
8292 case ABIArgInfo::Direct:
8293 Val = Builder.CreateBitCast(AP, ArgPtrTy);
8294 ArgSize = CharUnits::fromQuantity(
8295 getDataLayout().getTypeAllocSize(AI.getCoerceToType()));
8296 ArgSize = ArgSize.alignTo(SlotSize);
8298 case ABIArgInfo::Indirect:
8299 Val = Builder.CreateElementBitCast(AP, ArgPtrTy);
8300 Val = Address(Builder.CreateLoad(Val), TypeAlign);
8305 // Increment the VAList.
8306 if (!ArgSize.isZero()) {
8308 Builder.CreateConstInBoundsByteGEP(AP.getPointer(), ArgSize);
8309 Builder.CreateStore(APN, VAListAddr);
8315 /// During the expansion of a RecordType, an incomplete TypeString is placed
8316 /// into the cache as a means to identify and break recursion.
8317 /// If there is a Recursive encoding in the cache, it is swapped out and will
8318 /// be reinserted by removeIncomplete().
8319 /// All other types of encoding should have been used rather than arriving here.
8320 void TypeStringCache::addIncomplete(const IdentifierInfo *ID,
8321 std::string StubEnc) {
8325 assert( (E.Str.empty() || E.State == Recursive) &&
8326 "Incorrectly use of addIncomplete");
8327 assert(!StubEnc.empty() && "Passing an empty string to addIncomplete()");
8328 E.Swapped.swap(E.Str); // swap out the Recursive
8329 E.Str.swap(StubEnc);
8330 E.State = Incomplete;
8334 /// Once the RecordType has been expanded, the temporary incomplete TypeString
8335 /// must be removed from the cache.
8336 /// If a Recursive was swapped out by addIncomplete(), it will be replaced.
8337 /// Returns true if the RecordType was defined recursively.
8338 bool TypeStringCache::removeIncomplete(const IdentifierInfo *ID) {
8341 auto I = Map.find(ID);
8342 assert(I != Map.end() && "Entry not present");
8343 Entry &E = I->second;
8344 assert( (E.State == Incomplete ||
8345 E.State == IncompleteUsed) &&
8346 "Entry must be an incomplete type");
8347 bool IsRecursive = false;
8348 if (E.State == IncompleteUsed) {
8349 // We made use of our Incomplete encoding, thus we are recursive.
8351 --IncompleteUsedCount;
8353 if (E.Swapped.empty())
8356 // Swap the Recursive back.
8357 E.Swapped.swap(E.Str);
8359 E.State = Recursive;
8365 /// Add the encoded TypeString to the cache only if it is NonRecursive or
8366 /// Recursive (viz: all sub-members were expanded as fully as possible).
8367 void TypeStringCache::addIfComplete(const IdentifierInfo *ID, StringRef Str,
8369 if (!ID || IncompleteUsedCount)
8370 return; // No key or it is is an incomplete sub-type so don't add.
8372 if (IsRecursive && !E.Str.empty()) {
8373 assert(E.State==Recursive && E.Str.size() == Str.size() &&
8374 "This is not the same Recursive entry");
8375 // The parent container was not recursive after all, so we could have used
8376 // this Recursive sub-member entry after all, but we assumed the worse when
8377 // we started viz: IncompleteCount!=0.
8380 assert(E.Str.empty() && "Entry already present");
8382 E.State = IsRecursive? Recursive : NonRecursive;
8385 /// Return a cached TypeString encoding for the ID. If there isn't one, or we
8386 /// are recursively expanding a type (IncompleteCount != 0) and the cached
8387 /// encoding is Recursive, return an empty StringRef.
8388 StringRef TypeStringCache::lookupStr(const IdentifierInfo *ID) {
8390 return StringRef(); // We have no key.
8391 auto I = Map.find(ID);
8393 return StringRef(); // We have no encoding.
8394 Entry &E = I->second;
8395 if (E.State == Recursive && IncompleteCount)
8396 return StringRef(); // We don't use Recursive encodings for member types.
8398 if (E.State == Incomplete) {
8399 // The incomplete type is being used to break out of recursion.
8400 E.State = IncompleteUsed;
8401 ++IncompleteUsedCount;
8406 /// The XCore ABI includes a type information section that communicates symbol
8407 /// type information to the linker. The linker uses this information to verify
8408 /// safety/correctness of things such as array bound and pointers et al.
8409 /// The ABI only requires C (and XC) language modules to emit TypeStrings.
8410 /// This type information (TypeString) is emitted into meta data for all global
8411 /// symbols: definitions, declarations, functions & variables.
8413 /// The TypeString carries type, qualifier, name, size & value details.
8414 /// Please see 'Tools Development Guide' section 2.16.2 for format details:
8415 /// https://www.xmos.com/download/public/Tools-Development-Guide%28X9114A%29.pdf
8416 /// The output is tested by test/CodeGen/xcore-stringtype.c.
8418 static bool getTypeString(SmallStringEnc &Enc, const Decl *D,
8419 CodeGen::CodeGenModule &CGM, TypeStringCache &TSC);
8421 /// XCore uses emitTargetMD to emit TypeString metadata for global symbols.
8422 void XCoreTargetCodeGenInfo::emitTargetMD(const Decl *D, llvm::GlobalValue *GV,
8423 CodeGen::CodeGenModule &CGM) const {
8425 if (getTypeString(Enc, D, CGM, TSC)) {
8426 llvm::LLVMContext &Ctx = CGM.getModule().getContext();
8427 llvm::Metadata *MDVals[] = {llvm::ConstantAsMetadata::get(GV),
8428 llvm::MDString::get(Ctx, Enc.str())};
8429 llvm::NamedMDNode *MD =
8430 CGM.getModule().getOrInsertNamedMetadata("xcore.typestrings");
8431 MD->addOperand(llvm::MDNode::get(Ctx, MDVals));
8435 //===----------------------------------------------------------------------===//
8436 // SPIR ABI Implementation
8437 //===----------------------------------------------------------------------===//
8440 class SPIRTargetCodeGenInfo : public TargetCodeGenInfo {
8442 SPIRTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
8443 : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {}
8444 unsigned getOpenCLKernelCallingConv() const override;
8447 } // End anonymous namespace.
8451 void computeSPIRKernelABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI) {
8452 DefaultABIInfo SPIRABI(CGM.getTypes());
8453 SPIRABI.computeInfo(FI);
8458 unsigned SPIRTargetCodeGenInfo::getOpenCLKernelCallingConv() const {
8459 return llvm::CallingConv::SPIR_KERNEL;
8462 static bool appendType(SmallStringEnc &Enc, QualType QType,
8463 const CodeGen::CodeGenModule &CGM,
8464 TypeStringCache &TSC);
8466 /// Helper function for appendRecordType().
8467 /// Builds a SmallVector containing the encoded field types in declaration
8469 static bool extractFieldType(SmallVectorImpl<FieldEncoding> &FE,
8470 const RecordDecl *RD,
8471 const CodeGen::CodeGenModule &CGM,
8472 TypeStringCache &TSC) {
8473 for (const auto *Field : RD->fields()) {
8476 Enc += Field->getName();
8478 if (Field->isBitField()) {
8480 llvm::raw_svector_ostream OS(Enc);
8481 OS << Field->getBitWidthValue(CGM.getContext());
8484 if (!appendType(Enc, Field->getType(), CGM, TSC))
8486 if (Field->isBitField())
8489 FE.emplace_back(!Field->getName().empty(), Enc);
8494 /// Appends structure and union types to Enc and adds encoding to cache.
8495 /// Recursively calls appendType (via extractFieldType) for each field.
8496 /// Union types have their fields ordered according to the ABI.
8497 static bool appendRecordType(SmallStringEnc &Enc, const RecordType *RT,
8498 const CodeGen::CodeGenModule &CGM,
8499 TypeStringCache &TSC, const IdentifierInfo *ID) {
8500 // Append the cached TypeString if we have one.
8501 StringRef TypeString = TSC.lookupStr(ID);
8502 if (!TypeString.empty()) {
8507 // Start to emit an incomplete TypeString.
8508 size_t Start = Enc.size();
8509 Enc += (RT->isUnionType()? 'u' : 's');
8512 Enc += ID->getName();
8515 // We collect all encoded fields and order as necessary.
8516 bool IsRecursive = false;
8517 const RecordDecl *RD = RT->getDecl()->getDefinition();
8518 if (RD && !RD->field_empty()) {
8519 // An incomplete TypeString stub is placed in the cache for this RecordType
8520 // so that recursive calls to this RecordType will use it whilst building a
8521 // complete TypeString for this RecordType.
8522 SmallVector<FieldEncoding, 16> FE;
8523 std::string StubEnc(Enc.substr(Start).str());
8524 StubEnc += '}'; // StubEnc now holds a valid incomplete TypeString.
8525 TSC.addIncomplete(ID, std::move(StubEnc));
8526 if (!extractFieldType(FE, RD, CGM, TSC)) {
8527 (void) TSC.removeIncomplete(ID);
8530 IsRecursive = TSC.removeIncomplete(ID);
8531 // The ABI requires unions to be sorted but not structures.
8532 // See FieldEncoding::operator< for sort algorithm.
8533 if (RT->isUnionType())
8534 std::sort(FE.begin(), FE.end());
8535 // We can now complete the TypeString.
8536 unsigned E = FE.size();
8537 for (unsigned I = 0; I != E; ++I) {
8544 TSC.addIfComplete(ID, Enc.substr(Start), IsRecursive);
8548 /// Appends enum types to Enc and adds the encoding to the cache.
8549 static bool appendEnumType(SmallStringEnc &Enc, const EnumType *ET,
8550 TypeStringCache &TSC,
8551 const IdentifierInfo *ID) {
8552 // Append the cached TypeString if we have one.
8553 StringRef TypeString = TSC.lookupStr(ID);
8554 if (!TypeString.empty()) {
8559 size_t Start = Enc.size();
8562 Enc += ID->getName();
8565 // We collect all encoded enumerations and order them alphanumerically.
8566 if (const EnumDecl *ED = ET->getDecl()->getDefinition()) {
8567 SmallVector<FieldEncoding, 16> FE;
8568 for (auto I = ED->enumerator_begin(), E = ED->enumerator_end(); I != E;
8570 SmallStringEnc EnumEnc;
8572 EnumEnc += I->getName();
8574 I->getInitVal().toString(EnumEnc);
8576 FE.push_back(FieldEncoding(!I->getName().empty(), EnumEnc));
8578 std::sort(FE.begin(), FE.end());
8579 unsigned E = FE.size();
8580 for (unsigned I = 0; I != E; ++I) {
8587 TSC.addIfComplete(ID, Enc.substr(Start), false);
8591 /// Appends type's qualifier to Enc.
8592 /// This is done prior to appending the type's encoding.
8593 static void appendQualifier(SmallStringEnc &Enc, QualType QT) {
8594 // Qualifiers are emitted in alphabetical order.
8595 static const char *const Table[]={"","c:","r:","cr:","v:","cv:","rv:","crv:"};
8597 if (QT.isConstQualified())
8599 if (QT.isRestrictQualified())
8601 if (QT.isVolatileQualified())
8603 Enc += Table[Lookup];
8606 /// Appends built-in types to Enc.
8607 static bool appendBuiltinType(SmallStringEnc &Enc, const BuiltinType *BT) {
8608 const char *EncType;
8609 switch (BT->getKind()) {
8610 case BuiltinType::Void:
8613 case BuiltinType::Bool:
8616 case BuiltinType::Char_U:
8619 case BuiltinType::UChar:
8622 case BuiltinType::SChar:
8625 case BuiltinType::UShort:
8628 case BuiltinType::Short:
8631 case BuiltinType::UInt:
8634 case BuiltinType::Int:
8637 case BuiltinType::ULong:
8640 case BuiltinType::Long:
8643 case BuiltinType::ULongLong:
8646 case BuiltinType::LongLong:
8649 case BuiltinType::Float:
8652 case BuiltinType::Double:
8655 case BuiltinType::LongDouble:
8665 /// Appends a pointer encoding to Enc before calling appendType for the pointee.
8666 static bool appendPointerType(SmallStringEnc &Enc, const PointerType *PT,
8667 const CodeGen::CodeGenModule &CGM,
8668 TypeStringCache &TSC) {
8670 if (!appendType(Enc, PT->getPointeeType(), CGM, TSC))
8676 /// Appends array encoding to Enc before calling appendType for the element.
8677 static bool appendArrayType(SmallStringEnc &Enc, QualType QT,
8678 const ArrayType *AT,
8679 const CodeGen::CodeGenModule &CGM,
8680 TypeStringCache &TSC, StringRef NoSizeEnc) {
8681 if (AT->getSizeModifier() != ArrayType::Normal)
8684 if (const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(AT))
8685 CAT->getSize().toStringUnsigned(Enc);
8687 Enc += NoSizeEnc; // Global arrays use "*", otherwise it is "".
8689 // The Qualifiers should be attached to the type rather than the array.
8690 appendQualifier(Enc, QT);
8691 if (!appendType(Enc, AT->getElementType(), CGM, TSC))
8697 /// Appends a function encoding to Enc, calling appendType for the return type
8698 /// and the arguments.
8699 static bool appendFunctionType(SmallStringEnc &Enc, const FunctionType *FT,
8700 const CodeGen::CodeGenModule &CGM,
8701 TypeStringCache &TSC) {
8703 if (!appendType(Enc, FT->getReturnType(), CGM, TSC))
8706 if (const FunctionProtoType *FPT = FT->getAs<FunctionProtoType>()) {
8707 // N.B. we are only interested in the adjusted param types.
8708 auto I = FPT->param_type_begin();
8709 auto E = FPT->param_type_end();
8712 if (!appendType(Enc, *I, CGM, TSC))
8718 if (FPT->isVariadic())
8721 if (FPT->isVariadic())
8731 /// Handles the type's qualifier before dispatching a call to handle specific
8733 static bool appendType(SmallStringEnc &Enc, QualType QType,
8734 const CodeGen::CodeGenModule &CGM,
8735 TypeStringCache &TSC) {
8737 QualType QT = QType.getCanonicalType();
8739 if (const ArrayType *AT = QT->getAsArrayTypeUnsafe())
8740 // The Qualifiers should be attached to the type rather than the array.
8741 // Thus we don't call appendQualifier() here.
8742 return appendArrayType(Enc, QT, AT, CGM, TSC, "");
8744 appendQualifier(Enc, QT);
8746 if (const BuiltinType *BT = QT->getAs<BuiltinType>())
8747 return appendBuiltinType(Enc, BT);
8749 if (const PointerType *PT = QT->getAs<PointerType>())
8750 return appendPointerType(Enc, PT, CGM, TSC);
8752 if (const EnumType *ET = QT->getAs<EnumType>())
8753 return appendEnumType(Enc, ET, TSC, QT.getBaseTypeIdentifier());
8755 if (const RecordType *RT = QT->getAsStructureType())
8756 return appendRecordType(Enc, RT, CGM, TSC, QT.getBaseTypeIdentifier());
8758 if (const RecordType *RT = QT->getAsUnionType())
8759 return appendRecordType(Enc, RT, CGM, TSC, QT.getBaseTypeIdentifier());
8761 if (const FunctionType *FT = QT->getAs<FunctionType>())
8762 return appendFunctionType(Enc, FT, CGM, TSC);
8767 static bool getTypeString(SmallStringEnc &Enc, const Decl *D,
8768 CodeGen::CodeGenModule &CGM, TypeStringCache &TSC) {
8772 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
8773 if (FD->getLanguageLinkage() != CLanguageLinkage)
8775 return appendType(Enc, FD->getType(), CGM, TSC);
8778 if (const VarDecl *VD = dyn_cast<VarDecl>(D)) {
8779 if (VD->getLanguageLinkage() != CLanguageLinkage)
8781 QualType QT = VD->getType().getCanonicalType();
8782 if (const ArrayType *AT = QT->getAsArrayTypeUnsafe()) {
8783 // Global ArrayTypes are given a size of '*' if the size is unknown.
8784 // The Qualifiers should be attached to the type rather than the array.
8785 // Thus we don't call appendQualifier() here.
8786 return appendArrayType(Enc, QT, AT, CGM, TSC, "*");
8788 return appendType(Enc, QT, CGM, TSC);
8794 //===----------------------------------------------------------------------===//
8796 //===----------------------------------------------------------------------===//
8798 bool CodeGenModule::supportsCOMDAT() const {
8799 return getTriple().supportsCOMDAT();
8802 const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() {
8803 if (TheTargetCodeGenInfo)
8804 return *TheTargetCodeGenInfo;
8806 // Helper to set the unique_ptr while still keeping the return value.
8807 auto SetCGInfo = [&](TargetCodeGenInfo *P) -> const TargetCodeGenInfo & {
8808 this->TheTargetCodeGenInfo.reset(P);
8812 const llvm::Triple &Triple = getTarget().getTriple();
8813 switch (Triple.getArch()) {
8815 return SetCGInfo(new DefaultTargetCodeGenInfo(Types));
8817 case llvm::Triple::le32:
8818 return SetCGInfo(new PNaClTargetCodeGenInfo(Types));
8819 case llvm::Triple::mips:
8820 case llvm::Triple::mipsel:
8821 if (Triple.getOS() == llvm::Triple::NaCl)
8822 return SetCGInfo(new PNaClTargetCodeGenInfo(Types));
8823 return SetCGInfo(new MIPSTargetCodeGenInfo(Types, true));
8825 case llvm::Triple::mips64:
8826 case llvm::Triple::mips64el:
8827 return SetCGInfo(new MIPSTargetCodeGenInfo(Types, false));
8829 case llvm::Triple::avr:
8830 return SetCGInfo(new AVRTargetCodeGenInfo(Types));
8832 case llvm::Triple::aarch64:
8833 case llvm::Triple::aarch64_be: {
8834 AArch64ABIInfo::ABIKind Kind = AArch64ABIInfo::AAPCS;
8835 if (getTarget().getABI() == "darwinpcs")
8836 Kind = AArch64ABIInfo::DarwinPCS;
8837 else if (Triple.isOSWindows())
8839 new WindowsAArch64TargetCodeGenInfo(Types, AArch64ABIInfo::Win64));
8841 return SetCGInfo(new AArch64TargetCodeGenInfo(Types, Kind));
8844 case llvm::Triple::wasm32:
8845 case llvm::Triple::wasm64:
8846 return SetCGInfo(new WebAssemblyTargetCodeGenInfo(Types));
8848 case llvm::Triple::arm:
8849 case llvm::Triple::armeb:
8850 case llvm::Triple::thumb:
8851 case llvm::Triple::thumbeb: {
8852 if (Triple.getOS() == llvm::Triple::Win32) {
8854 new WindowsARMTargetCodeGenInfo(Types, ARMABIInfo::AAPCS_VFP));
8857 ARMABIInfo::ABIKind Kind = ARMABIInfo::AAPCS;
8858 StringRef ABIStr = getTarget().getABI();
8859 if (ABIStr == "apcs-gnu")
8860 Kind = ARMABIInfo::APCS;
8861 else if (ABIStr == "aapcs16")
8862 Kind = ARMABIInfo::AAPCS16_VFP;
8863 else if (CodeGenOpts.FloatABI == "hard" ||
8864 (CodeGenOpts.FloatABI != "soft" &&
8865 (Triple.getEnvironment() == llvm::Triple::GNUEABIHF ||
8866 Triple.getEnvironment() == llvm::Triple::MuslEABIHF ||
8867 Triple.getEnvironment() == llvm::Triple::EABIHF)))
8868 Kind = ARMABIInfo::AAPCS_VFP;
8870 return SetCGInfo(new ARMTargetCodeGenInfo(Types, Kind));
8873 case llvm::Triple::ppc:
8875 new PPC32TargetCodeGenInfo(Types, CodeGenOpts.FloatABI == "soft"));
8876 case llvm::Triple::ppc64:
8877 if (Triple.isOSBinFormatELF()) {
8878 PPC64_SVR4_ABIInfo::ABIKind Kind = PPC64_SVR4_ABIInfo::ELFv1;
8879 if (getTarget().getABI() == "elfv2")
8880 Kind = PPC64_SVR4_ABIInfo::ELFv2;
8881 bool HasQPX = getTarget().getABI() == "elfv1-qpx";
8882 bool IsSoftFloat = CodeGenOpts.FloatABI == "soft";
8884 return SetCGInfo(new PPC64_SVR4_TargetCodeGenInfo(Types, Kind, HasQPX,
8887 return SetCGInfo(new PPC64TargetCodeGenInfo(Types));
8888 case llvm::Triple::ppc64le: {
8889 assert(Triple.isOSBinFormatELF() && "PPC64 LE non-ELF not supported!");
8890 PPC64_SVR4_ABIInfo::ABIKind Kind = PPC64_SVR4_ABIInfo::ELFv2;
8891 if (getTarget().getABI() == "elfv1" || getTarget().getABI() == "elfv1-qpx")
8892 Kind = PPC64_SVR4_ABIInfo::ELFv1;
8893 bool HasQPX = getTarget().getABI() == "elfv1-qpx";
8894 bool IsSoftFloat = CodeGenOpts.FloatABI == "soft";
8896 return SetCGInfo(new PPC64_SVR4_TargetCodeGenInfo(Types, Kind, HasQPX,
8900 case llvm::Triple::nvptx:
8901 case llvm::Triple::nvptx64:
8902 return SetCGInfo(new NVPTXTargetCodeGenInfo(Types));
8904 case llvm::Triple::msp430:
8905 return SetCGInfo(new MSP430TargetCodeGenInfo(Types));
8907 case llvm::Triple::systemz: {
8908 bool HasVector = getTarget().getABI() == "vector";
8909 return SetCGInfo(new SystemZTargetCodeGenInfo(Types, HasVector));
8912 case llvm::Triple::tce:
8913 case llvm::Triple::tcele:
8914 return SetCGInfo(new TCETargetCodeGenInfo(Types));
8916 case llvm::Triple::x86: {
8917 bool IsDarwinVectorABI = Triple.isOSDarwin();
8918 bool RetSmallStructInRegABI =
8919 X86_32TargetCodeGenInfo::isStructReturnInRegABI(Triple, CodeGenOpts);
8920 bool IsWin32FloatStructABI = Triple.isOSWindows() && !Triple.isOSCygMing();
8922 if (Triple.getOS() == llvm::Triple::Win32) {
8923 return SetCGInfo(new WinX86_32TargetCodeGenInfo(
8924 Types, IsDarwinVectorABI, RetSmallStructInRegABI,
8925 IsWin32FloatStructABI, CodeGenOpts.NumRegisterParameters));
8927 return SetCGInfo(new X86_32TargetCodeGenInfo(
8928 Types, IsDarwinVectorABI, RetSmallStructInRegABI,
8929 IsWin32FloatStructABI, CodeGenOpts.NumRegisterParameters,
8930 CodeGenOpts.FloatABI == "soft"));
8934 case llvm::Triple::x86_64: {
8935 StringRef ABI = getTarget().getABI();
8936 X86AVXABILevel AVXLevel =
8938 ? X86AVXABILevel::AVX512
8939 : ABI == "avx" ? X86AVXABILevel::AVX : X86AVXABILevel::None);
8941 switch (Triple.getOS()) {
8942 case llvm::Triple::Win32:
8943 return SetCGInfo(new WinX86_64TargetCodeGenInfo(Types, AVXLevel));
8944 case llvm::Triple::PS4:
8945 return SetCGInfo(new PS4TargetCodeGenInfo(Types, AVXLevel));
8947 return SetCGInfo(new X86_64TargetCodeGenInfo(Types, AVXLevel));
8950 case llvm::Triple::hexagon:
8951 return SetCGInfo(new HexagonTargetCodeGenInfo(Types));
8952 case llvm::Triple::lanai:
8953 return SetCGInfo(new LanaiTargetCodeGenInfo(Types));
8954 case llvm::Triple::r600:
8955 return SetCGInfo(new AMDGPUTargetCodeGenInfo(Types));
8956 case llvm::Triple::amdgcn:
8957 return SetCGInfo(new AMDGPUTargetCodeGenInfo(Types));
8958 case llvm::Triple::sparc:
8959 return SetCGInfo(new SparcV8TargetCodeGenInfo(Types));
8960 case llvm::Triple::sparcv9:
8961 return SetCGInfo(new SparcV9TargetCodeGenInfo(Types));
8962 case llvm::Triple::xcore:
8963 return SetCGInfo(new XCoreTargetCodeGenInfo(Types));
8964 case llvm::Triple::spir:
8965 case llvm::Triple::spir64:
8966 return SetCGInfo(new SPIRTargetCodeGenInfo(Types));
8970 /// Create an OpenCL kernel for an enqueued block.
8972 /// The kernel has the same function type as the block invoke function. Its
8973 /// name is the name of the block invoke function postfixed with "_kernel".
8974 /// It simply calls the block invoke function then returns.
8976 TargetCodeGenInfo::createEnqueuedBlockKernel(CodeGenFunction &CGF,
8977 llvm::Function *Invoke,
8978 llvm::Value *BlockLiteral) const {
8979 auto *InvokeFT = Invoke->getFunctionType();
8980 llvm::SmallVector<llvm::Type *, 2> ArgTys;
8981 for (auto &P : InvokeFT->params())
8982 ArgTys.push_back(P);
8983 auto &C = CGF.getLLVMContext();
8984 std::string Name = Invoke->getName().str() + "_kernel";
8985 auto *FT = llvm::FunctionType::get(llvm::Type::getVoidTy(C), ArgTys, false);
8986 auto *F = llvm::Function::Create(FT, llvm::GlobalValue::InternalLinkage, Name,
8987 &CGF.CGM.getModule());
8988 auto IP = CGF.Builder.saveIP();
8989 auto *BB = llvm::BasicBlock::Create(C, "entry", F);
8990 auto &Builder = CGF.Builder;
8991 Builder.SetInsertPoint(BB);
8992 llvm::SmallVector<llvm::Value *, 2> Args;
8993 for (auto &A : F->args())
8995 Builder.CreateCall(Invoke, Args);
8996 Builder.CreateRetVoid();
8997 Builder.restoreIP(IP);
9001 /// Create an OpenCL kernel for an enqueued block.
9003 /// The type of the first argument (the block literal) is the struct type
9004 /// of the block literal instead of a pointer type. The first argument
9005 /// (block literal) is passed directly by value to the kernel. The kernel
9006 /// allocates the same type of struct on stack and stores the block literal
9007 /// to it and passes its pointer to the block invoke function. The kernel
9008 /// has "enqueued-block" function attribute and kernel argument metadata.
9009 llvm::Function *AMDGPUTargetCodeGenInfo::createEnqueuedBlockKernel(
9010 CodeGenFunction &CGF, llvm::Function *Invoke,
9011 llvm::Value *BlockLiteral) const {
9012 auto &Builder = CGF.Builder;
9013 auto &C = CGF.getLLVMContext();
9015 auto *BlockTy = BlockLiteral->getType()->getPointerElementType();
9016 auto *InvokeFT = Invoke->getFunctionType();
9017 llvm::SmallVector<llvm::Type *, 2> ArgTys;
9018 llvm::SmallVector<llvm::Metadata *, 8> AddressQuals;
9019 llvm::SmallVector<llvm::Metadata *, 8> AccessQuals;
9020 llvm::SmallVector<llvm::Metadata *, 8> ArgTypeNames;
9021 llvm::SmallVector<llvm::Metadata *, 8> ArgBaseTypeNames;
9022 llvm::SmallVector<llvm::Metadata *, 8> ArgTypeQuals;
9023 llvm::SmallVector<llvm::Metadata *, 8> ArgNames;
9025 ArgTys.push_back(BlockTy);
9026 ArgTypeNames.push_back(llvm::MDString::get(C, "__block_literal"));
9027 AddressQuals.push_back(llvm::ConstantAsMetadata::get(Builder.getInt32(0)));
9028 ArgBaseTypeNames.push_back(llvm::MDString::get(C, "__block_literal"));
9029 ArgTypeQuals.push_back(llvm::MDString::get(C, ""));
9030 AccessQuals.push_back(llvm::MDString::get(C, "none"));
9031 ArgNames.push_back(llvm::MDString::get(C, "block_literal"));
9032 for (unsigned I = 1, E = InvokeFT->getNumParams(); I < E; ++I) {
9033 ArgTys.push_back(InvokeFT->getParamType(I));
9034 ArgTypeNames.push_back(llvm::MDString::get(C, "void*"));
9035 AddressQuals.push_back(llvm::ConstantAsMetadata::get(Builder.getInt32(3)));
9036 AccessQuals.push_back(llvm::MDString::get(C, "none"));
9037 ArgBaseTypeNames.push_back(llvm::MDString::get(C, "void*"));
9038 ArgTypeQuals.push_back(llvm::MDString::get(C, ""));
9040 llvm::MDString::get(C, (Twine("local_arg") + Twine(I)).str()));
9042 std::string Name = Invoke->getName().str() + "_kernel";
9043 auto *FT = llvm::FunctionType::get(llvm::Type::getVoidTy(C), ArgTys, false);
9044 auto *F = llvm::Function::Create(FT, llvm::GlobalValue::InternalLinkage, Name,
9045 &CGF.CGM.getModule());
9046 F->addFnAttr("enqueued-block");
9047 auto IP = CGF.Builder.saveIP();
9048 auto *BB = llvm::BasicBlock::Create(C, "entry", F);
9049 Builder.SetInsertPoint(BB);
9050 unsigned BlockAlign = CGF.CGM.getDataLayout().getPrefTypeAlignment(BlockTy);
9051 auto *BlockPtr = Builder.CreateAlloca(BlockTy, nullptr);
9052 BlockPtr->setAlignment(BlockAlign);
9053 Builder.CreateAlignedStore(F->arg_begin(), BlockPtr, BlockAlign);
9054 auto *Cast = Builder.CreatePointerCast(BlockPtr, InvokeFT->getParamType(0));
9055 llvm::SmallVector<llvm::Value *, 2> Args;
9056 Args.push_back(Cast);
9057 for (auto I = F->arg_begin() + 1, E = F->arg_end(); I != E; ++I)
9059 Builder.CreateCall(Invoke, Args);
9060 Builder.CreateRetVoid();
9061 Builder.restoreIP(IP);
9063 F->setMetadata("kernel_arg_addr_space", llvm::MDNode::get(C, AddressQuals));
9064 F->setMetadata("kernel_arg_access_qual", llvm::MDNode::get(C, AccessQuals));
9065 F->setMetadata("kernel_arg_type", llvm::MDNode::get(C, ArgTypeNames));
9066 F->setMetadata("kernel_arg_base_type",
9067 llvm::MDNode::get(C, ArgBaseTypeNames));
9068 F->setMetadata("kernel_arg_type_qual", llvm::MDNode::get(C, ArgTypeQuals));
9069 if (CGF.CGM.getCodeGenOpts().EmitOpenCLArgMetadata)
9070 F->setMetadata("kernel_arg_name", llvm::MDNode::get(C, ArgNames));