1 //===---- TargetInfo.cpp - Encapsulate target details -----------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // These classes wrap the information about a call or function
11 // definition used to handle ABI compliancy.
13 //===----------------------------------------------------------------------===//
15 #include "TargetInfo.h"
19 #include "CodeGenFunction.h"
20 #include "clang/AST/RecordLayout.h"
21 #include "clang/CodeGen/CGFunctionInfo.h"
22 #include "clang/Frontend/CodeGenOptions.h"
23 #include "llvm/ADT/StringExtras.h"
24 #include "llvm/ADT/Triple.h"
25 #include "llvm/IR/DataLayout.h"
26 #include "llvm/IR/Type.h"
27 #include "llvm/Support/raw_ostream.h"
28 #include <algorithm> // std::sort
30 using namespace clang;
31 using namespace CodeGen;
33 static void AssignToArrayRange(CodeGen::CGBuilderTy &Builder,
38 // Alternatively, we could emit this as a loop in the source.
39 for (unsigned I = FirstIndex; I <= LastIndex; ++I) {
41 Builder.CreateConstInBoundsGEP1_32(Builder.getInt8Ty(), Array, I);
42 Builder.CreateAlignedStore(Value, Cell, CharUnits::One());
46 static bool isAggregateTypeForABI(QualType T) {
47 return !CodeGenFunction::hasScalarEvaluationKind(T) ||
48 T->isMemberFunctionPointerType();
52 ABIInfo::getNaturalAlignIndirect(QualType Ty, bool ByRef, bool Realign,
53 llvm::Type *Padding) const {
54 return ABIArgInfo::getIndirect(getContext().getTypeAlignInChars(Ty),
55 ByRef, Realign, Padding);
59 ABIInfo::getNaturalAlignIndirectInReg(QualType Ty, bool Realign) const {
60 return ABIArgInfo::getIndirectInReg(getContext().getTypeAlignInChars(Ty),
61 /*ByRef*/ false, Realign);
64 Address ABIInfo::EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
66 return Address::invalid();
69 ABIInfo::~ABIInfo() {}
71 static CGCXXABI::RecordArgABI getRecordArgABI(const RecordType *RT,
73 const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl());
75 return CGCXXABI::RAA_Default;
76 return CXXABI.getRecordArgABI(RD);
79 static CGCXXABI::RecordArgABI getRecordArgABI(QualType T,
81 const RecordType *RT = T->getAs<RecordType>();
83 return CGCXXABI::RAA_Default;
84 return getRecordArgABI(RT, CXXABI);
87 /// Pass transparent unions as if they were the type of the first element. Sema
88 /// should ensure that all elements of the union have the same "machine type".
89 static QualType useFirstFieldIfTransparentUnion(QualType Ty) {
90 if (const RecordType *UT = Ty->getAsUnionType()) {
91 const RecordDecl *UD = UT->getDecl();
92 if (UD->hasAttr<TransparentUnionAttr>()) {
93 assert(!UD->field_empty() && "sema created an empty transparent union");
94 return UD->field_begin()->getType();
100 CGCXXABI &ABIInfo::getCXXABI() const {
101 return CGT.getCXXABI();
104 ASTContext &ABIInfo::getContext() const {
105 return CGT.getContext();
108 llvm::LLVMContext &ABIInfo::getVMContext() const {
109 return CGT.getLLVMContext();
112 const llvm::DataLayout &ABIInfo::getDataLayout() const {
113 return CGT.getDataLayout();
116 const TargetInfo &ABIInfo::getTarget() const {
117 return CGT.getTarget();
120 bool ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
124 bool ABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base,
125 uint64_t Members) const {
129 bool ABIInfo::shouldSignExtUnsignedType(QualType Ty) const {
133 void ABIArgInfo::dump() const {
134 raw_ostream &OS = llvm::errs();
135 OS << "(ABIArgInfo Kind=";
138 OS << "Direct Type=";
139 if (llvm::Type *Ty = getCoerceToType())
151 OS << "InAlloca Offset=" << getInAllocaFieldIndex();
154 OS << "Indirect Align=" << getIndirectAlign().getQuantity()
155 << " ByVal=" << getIndirectByVal()
156 << " Realign=" << getIndirectRealign();
165 // Dynamically round a pointer up to a multiple of the given alignment.
166 static llvm::Value *emitRoundPointerUpToAlignment(CodeGenFunction &CGF,
169 llvm::Value *PtrAsInt = Ptr;
170 // OverflowArgArea = (OverflowArgArea + Align - 1) & -Align;
171 PtrAsInt = CGF.Builder.CreatePtrToInt(PtrAsInt, CGF.IntPtrTy);
172 PtrAsInt = CGF.Builder.CreateAdd(PtrAsInt,
173 llvm::ConstantInt::get(CGF.IntPtrTy, Align.getQuantity() - 1));
174 PtrAsInt = CGF.Builder.CreateAnd(PtrAsInt,
175 llvm::ConstantInt::get(CGF.IntPtrTy, -Align.getQuantity()));
176 PtrAsInt = CGF.Builder.CreateIntToPtr(PtrAsInt,
178 Ptr->getName() + ".aligned");
182 /// Emit va_arg for a platform using the common void* representation,
183 /// where arguments are simply emitted in an array of slots on the stack.
185 /// This version implements the core direct-value passing rules.
187 /// \param SlotSize - The size and alignment of a stack slot.
188 /// Each argument will be allocated to a multiple of this number of
189 /// slots, and all the slots will be aligned to this value.
190 /// \param AllowHigherAlign - The slot alignment is not a cap;
191 /// an argument type with an alignment greater than the slot size
192 /// will be emitted on a higher-alignment address, potentially
193 /// leaving one or more empty slots behind as padding. If this
194 /// is false, the returned address might be less-aligned than
196 static Address emitVoidPtrDirectVAArg(CodeGenFunction &CGF,
198 llvm::Type *DirectTy,
199 CharUnits DirectSize,
200 CharUnits DirectAlign,
202 bool AllowHigherAlign) {
203 // Cast the element type to i8* if necessary. Some platforms define
204 // va_list as a struct containing an i8* instead of just an i8*.
205 if (VAListAddr.getElementType() != CGF.Int8PtrTy)
206 VAListAddr = CGF.Builder.CreateElementBitCast(VAListAddr, CGF.Int8PtrTy);
208 llvm::Value *Ptr = CGF.Builder.CreateLoad(VAListAddr, "argp.cur");
210 // If the CC aligns values higher than the slot size, do so if needed.
211 Address Addr = Address::invalid();
212 if (AllowHigherAlign && DirectAlign > SlotSize) {
213 Addr = Address(emitRoundPointerUpToAlignment(CGF, Ptr, DirectAlign),
216 Addr = Address(Ptr, SlotSize);
219 // Advance the pointer past the argument, then store that back.
220 CharUnits FullDirectSize = DirectSize.RoundUpToAlignment(SlotSize);
221 llvm::Value *NextPtr =
222 CGF.Builder.CreateConstInBoundsByteGEP(Addr.getPointer(), FullDirectSize,
224 CGF.Builder.CreateStore(NextPtr, VAListAddr);
226 // If the argument is smaller than a slot, and this is a big-endian
227 // target, the argument will be right-adjusted in its slot.
228 if (DirectSize < SlotSize && CGF.CGM.getDataLayout().isBigEndian()) {
229 Addr = CGF.Builder.CreateConstInBoundsByteGEP(Addr, SlotSize - DirectSize);
232 Addr = CGF.Builder.CreateElementBitCast(Addr, DirectTy);
236 /// Emit va_arg for a platform using the common void* representation,
237 /// where arguments are simply emitted in an array of slots on the stack.
239 /// \param IsIndirect - Values of this type are passed indirectly.
240 /// \param ValueInfo - The size and alignment of this type, generally
241 /// computed with getContext().getTypeInfoInChars(ValueTy).
242 /// \param SlotSizeAndAlign - The size and alignment of a stack slot.
243 /// Each argument will be allocated to a multiple of this number of
244 /// slots, and all the slots will be aligned to this value.
245 /// \param AllowHigherAlign - The slot alignment is not a cap;
246 /// an argument type with an alignment greater than the slot size
247 /// will be emitted on a higher-alignment address, potentially
248 /// leaving one or more empty slots behind as padding.
249 static Address emitVoidPtrVAArg(CodeGenFunction &CGF, Address VAListAddr,
250 QualType ValueTy, bool IsIndirect,
251 std::pair<CharUnits, CharUnits> ValueInfo,
252 CharUnits SlotSizeAndAlign,
253 bool AllowHigherAlign) {
254 // The size and alignment of the value that was passed directly.
255 CharUnits DirectSize, DirectAlign;
257 DirectSize = CGF.getPointerSize();
258 DirectAlign = CGF.getPointerAlign();
260 DirectSize = ValueInfo.first;
261 DirectAlign = ValueInfo.second;
264 // Cast the address we've calculated to the right type.
265 llvm::Type *DirectTy = CGF.ConvertTypeForMem(ValueTy);
267 DirectTy = DirectTy->getPointerTo(0);
269 Address Addr = emitVoidPtrDirectVAArg(CGF, VAListAddr, DirectTy,
270 DirectSize, DirectAlign,
275 Addr = Address(CGF.Builder.CreateLoad(Addr), ValueInfo.second);
282 static Address emitMergePHI(CodeGenFunction &CGF,
283 Address Addr1, llvm::BasicBlock *Block1,
284 Address Addr2, llvm::BasicBlock *Block2,
285 const llvm::Twine &Name = "") {
286 assert(Addr1.getType() == Addr2.getType());
287 llvm::PHINode *PHI = CGF.Builder.CreatePHI(Addr1.getType(), 2, Name);
288 PHI->addIncoming(Addr1.getPointer(), Block1);
289 PHI->addIncoming(Addr2.getPointer(), Block2);
290 CharUnits Align = std::min(Addr1.getAlignment(), Addr2.getAlignment());
291 return Address(PHI, Align);
294 TargetCodeGenInfo::~TargetCodeGenInfo() { delete Info; }
296 // If someone can figure out a general rule for this, that would be great.
297 // It's probably just doomed to be platform-dependent, though.
298 unsigned TargetCodeGenInfo::getSizeOfUnwindException() const {
300 // x86-64 FreeBSD, Linux, Darwin
301 // x86-32 FreeBSD, Linux, Darwin
302 // PowerPC Linux, Darwin
303 // ARM Darwin (*not* EABI)
308 bool TargetCodeGenInfo::isNoProtoCallVariadic(const CallArgList &args,
309 const FunctionNoProtoType *fnType) const {
310 // The following conventions are known to require this to be false:
313 // For everything else, we just prefer false unless we opt out.
318 TargetCodeGenInfo::getDependentLibraryOption(llvm::StringRef Lib,
319 llvm::SmallString<24> &Opt) const {
320 // This assumes the user is passing a library name like "rt" instead of a
321 // filename like "librt.a/so", and that they don't care whether it's static or
327 static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays);
329 /// isEmptyField - Return true iff a the field is "empty", that is it
330 /// is an unnamed bit-field or an (array of) empty record(s).
331 static bool isEmptyField(ASTContext &Context, const FieldDecl *FD,
333 if (FD->isUnnamedBitfield())
336 QualType FT = FD->getType();
338 // Constant arrays of empty records count as empty, strip them off.
339 // Constant arrays of zero length always count as empty.
341 while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) {
342 if (AT->getSize() == 0)
344 FT = AT->getElementType();
347 const RecordType *RT = FT->getAs<RecordType>();
351 // C++ record fields are never empty, at least in the Itanium ABI.
353 // FIXME: We should use a predicate for whether this behavior is true in the
355 if (isa<CXXRecordDecl>(RT->getDecl()))
358 return isEmptyRecord(Context, FT, AllowArrays);
361 /// isEmptyRecord - Return true iff a structure contains only empty
362 /// fields. Note that a structure with a flexible array member is not
363 /// considered empty.
364 static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays) {
365 const RecordType *RT = T->getAs<RecordType>();
368 const RecordDecl *RD = RT->getDecl();
369 if (RD->hasFlexibleArrayMember())
372 // If this is a C++ record, check the bases first.
373 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
374 for (const auto &I : CXXRD->bases())
375 if (!isEmptyRecord(Context, I.getType(), true))
378 for (const auto *I : RD->fields())
379 if (!isEmptyField(Context, I, AllowArrays))
384 /// isSingleElementStruct - Determine if a structure is a "single
385 /// element struct", i.e. it has exactly one non-empty field or
386 /// exactly one field which is itself a single element
387 /// struct. Structures with flexible array members are never
388 /// considered single element structs.
390 /// \return The field declaration for the single non-empty field, if
392 static const Type *isSingleElementStruct(QualType T, ASTContext &Context) {
393 const RecordType *RT = T->getAs<RecordType>();
397 const RecordDecl *RD = RT->getDecl();
398 if (RD->hasFlexibleArrayMember())
401 const Type *Found = nullptr;
403 // If this is a C++ record, check the bases first.
404 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
405 for (const auto &I : CXXRD->bases()) {
406 // Ignore empty records.
407 if (isEmptyRecord(Context, I.getType(), true))
410 // If we already found an element then this isn't a single-element struct.
414 // If this is non-empty and not a single element struct, the composite
415 // cannot be a single element struct.
416 Found = isSingleElementStruct(I.getType(), Context);
422 // Check for single element.
423 for (const auto *FD : RD->fields()) {
424 QualType FT = FD->getType();
426 // Ignore empty fields.
427 if (isEmptyField(Context, FD, true))
430 // If we already found an element then this isn't a single-element
435 // Treat single element arrays as the element.
436 while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) {
437 if (AT->getSize().getZExtValue() != 1)
439 FT = AT->getElementType();
442 if (!isAggregateTypeForABI(FT)) {
443 Found = FT.getTypePtr();
445 Found = isSingleElementStruct(FT, Context);
451 // We don't consider a struct a single-element struct if it has
452 // padding beyond the element type.
453 if (Found && Context.getTypeSize(Found) != Context.getTypeSize(T))
459 static bool is32Or64BitBasicType(QualType Ty, ASTContext &Context) {
460 // Treat complex types as the element type.
461 if (const ComplexType *CTy = Ty->getAs<ComplexType>())
462 Ty = CTy->getElementType();
464 // Check for a type which we know has a simple scalar argument-passing
465 // convention without any padding. (We're specifically looking for 32
466 // and 64-bit integer and integer-equivalents, float, and double.)
467 if (!Ty->getAs<BuiltinType>() && !Ty->hasPointerRepresentation() &&
468 !Ty->isEnumeralType() && !Ty->isBlockPointerType())
471 uint64_t Size = Context.getTypeSize(Ty);
472 return Size == 32 || Size == 64;
475 /// canExpandIndirectArgument - Test whether an argument type which is to be
476 /// passed indirectly (on the stack) would have the equivalent layout if it was
477 /// expanded into separate arguments. If so, we prefer to do the latter to avoid
478 /// inhibiting optimizations.
480 // FIXME: This predicate is missing many cases, currently it just follows
481 // llvm-gcc (checks that all fields are 32-bit or 64-bit primitive types). We
482 // should probably make this smarter, or better yet make the LLVM backend
483 // capable of handling it.
484 static bool canExpandIndirectArgument(QualType Ty, ASTContext &Context) {
485 // We can only expand structure types.
486 const RecordType *RT = Ty->getAs<RecordType>();
490 // We can only expand (C) structures.
492 // FIXME: This needs to be generalized to handle classes as well.
493 const RecordDecl *RD = RT->getDecl();
497 // We try to expand CLike CXXRecordDecl.
498 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
499 if (!CXXRD->isCLike())
505 for (const auto *FD : RD->fields()) {
506 if (!is32Or64BitBasicType(FD->getType(), Context))
509 // FIXME: Reject bit-fields wholesale; there are two problems, we don't know
510 // how to expand them yet, and the predicate for telling if a bitfield still
511 // counts as "basic" is more complicated than what we were doing previously.
512 if (FD->isBitField())
515 Size += Context.getTypeSize(FD->getType());
518 // Make sure there are not any holes in the struct.
519 if (Size != Context.getTypeSize(Ty))
526 /// DefaultABIInfo - The default implementation for ABI specific
527 /// details. This implementation provides information which results in
528 /// self-consistent and sensible LLVM IR generation, but does not
529 /// conform to any particular ABI.
530 class DefaultABIInfo : public ABIInfo {
532 DefaultABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {}
534 ABIArgInfo classifyReturnType(QualType RetTy) const;
535 ABIArgInfo classifyArgumentType(QualType RetTy) const;
537 void computeInfo(CGFunctionInfo &FI) const override {
538 if (!getCXXABI().classifyReturnType(FI))
539 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
540 for (auto &I : FI.arguments())
541 I.info = classifyArgumentType(I.type);
544 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
545 QualType Ty) const override;
548 class DefaultTargetCodeGenInfo : public TargetCodeGenInfo {
550 DefaultTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
551 : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {}
554 Address DefaultABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
556 return Address::invalid();
559 ABIArgInfo DefaultABIInfo::classifyArgumentType(QualType Ty) const {
560 Ty = useFirstFieldIfTransparentUnion(Ty);
562 if (isAggregateTypeForABI(Ty)) {
563 // Records with non-trivial destructors/copy-constructors should not be
565 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
566 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
568 return getNaturalAlignIndirect(Ty);
571 // Treat an enum type as its underlying type.
572 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
573 Ty = EnumTy->getDecl()->getIntegerType();
575 return (Ty->isPromotableIntegerType() ?
576 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
579 ABIArgInfo DefaultABIInfo::classifyReturnType(QualType RetTy) const {
580 if (RetTy->isVoidType())
581 return ABIArgInfo::getIgnore();
583 if (isAggregateTypeForABI(RetTy))
584 return getNaturalAlignIndirect(RetTy);
586 // Treat an enum type as its underlying type.
587 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
588 RetTy = EnumTy->getDecl()->getIntegerType();
590 return (RetTy->isPromotableIntegerType() ?
591 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
594 //===----------------------------------------------------------------------===//
595 // WebAssembly ABI Implementation
597 // This is a very simple ABI that relies a lot on DefaultABIInfo.
598 //===----------------------------------------------------------------------===//
600 class WebAssemblyABIInfo final : public DefaultABIInfo {
602 explicit WebAssemblyABIInfo(CodeGen::CodeGenTypes &CGT)
603 : DefaultABIInfo(CGT) {}
606 ABIArgInfo classifyReturnType(QualType RetTy) const;
607 ABIArgInfo classifyArgumentType(QualType Ty) const;
609 // DefaultABIInfo's classifyReturnType and classifyArgumentType are
610 // non-virtual, but computeInfo is virtual, so we overload that.
611 void computeInfo(CGFunctionInfo &FI) const override {
612 if (!getCXXABI().classifyReturnType(FI))
613 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
614 for (auto &Arg : FI.arguments())
615 Arg.info = classifyArgumentType(Arg.type);
619 class WebAssemblyTargetCodeGenInfo final : public TargetCodeGenInfo {
621 explicit WebAssemblyTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
622 : TargetCodeGenInfo(new WebAssemblyABIInfo(CGT)) {}
625 /// \brief Classify argument of given type \p Ty.
626 ABIArgInfo WebAssemblyABIInfo::classifyArgumentType(QualType Ty) const {
627 Ty = useFirstFieldIfTransparentUnion(Ty);
629 if (isAggregateTypeForABI(Ty)) {
630 // Records with non-trivial destructors/copy-constructors should not be
632 if (auto RAA = getRecordArgABI(Ty, getCXXABI()))
633 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
634 // Ignore empty structs/unions.
635 if (isEmptyRecord(getContext(), Ty, true))
636 return ABIArgInfo::getIgnore();
637 // Lower single-element structs to just pass a regular value. TODO: We
638 // could do reasonable-size multiple-element structs too, using getExpand(),
639 // though watch out for things like bitfields.
640 if (const Type *SeltTy = isSingleElementStruct(Ty, getContext()))
641 return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
644 // Otherwise just do the default thing.
645 return DefaultABIInfo::classifyArgumentType(Ty);
648 ABIArgInfo WebAssemblyABIInfo::classifyReturnType(QualType RetTy) const {
649 if (isAggregateTypeForABI(RetTy)) {
650 // Records with non-trivial destructors/copy-constructors should not be
651 // returned by value.
652 if (!getRecordArgABI(RetTy, getCXXABI())) {
653 // Ignore empty structs/unions.
654 if (isEmptyRecord(getContext(), RetTy, true))
655 return ABIArgInfo::getIgnore();
656 // Lower single-element structs to just return a regular value. TODO: We
657 // could do reasonable-size multiple-element structs too, using
658 // ABIArgInfo::getDirect().
659 if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext()))
660 return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
664 // Otherwise just do the default thing.
665 return DefaultABIInfo::classifyReturnType(RetTy);
668 //===----------------------------------------------------------------------===//
669 // le32/PNaCl bitcode ABI Implementation
671 // This is a simplified version of the x86_32 ABI. Arguments and return values
672 // are always passed on the stack.
673 //===----------------------------------------------------------------------===//
675 class PNaClABIInfo : public ABIInfo {
677 PNaClABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {}
679 ABIArgInfo classifyReturnType(QualType RetTy) const;
680 ABIArgInfo classifyArgumentType(QualType RetTy) const;
682 void computeInfo(CGFunctionInfo &FI) const override;
683 Address EmitVAArg(CodeGenFunction &CGF,
684 Address VAListAddr, QualType Ty) const override;
687 class PNaClTargetCodeGenInfo : public TargetCodeGenInfo {
689 PNaClTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
690 : TargetCodeGenInfo(new PNaClABIInfo(CGT)) {}
693 void PNaClABIInfo::computeInfo(CGFunctionInfo &FI) const {
694 if (!getCXXABI().classifyReturnType(FI))
695 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
697 for (auto &I : FI.arguments())
698 I.info = classifyArgumentType(I.type);
701 Address PNaClABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
703 return Address::invalid();
706 /// \brief Classify argument of given type \p Ty.
707 ABIArgInfo PNaClABIInfo::classifyArgumentType(QualType Ty) const {
708 if (isAggregateTypeForABI(Ty)) {
709 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
710 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
711 return getNaturalAlignIndirect(Ty);
712 } else if (const EnumType *EnumTy = Ty->getAs<EnumType>()) {
713 // Treat an enum type as its underlying type.
714 Ty = EnumTy->getDecl()->getIntegerType();
715 } else if (Ty->isFloatingType()) {
716 // Floating-point types don't go inreg.
717 return ABIArgInfo::getDirect();
720 return (Ty->isPromotableIntegerType() ?
721 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
724 ABIArgInfo PNaClABIInfo::classifyReturnType(QualType RetTy) const {
725 if (RetTy->isVoidType())
726 return ABIArgInfo::getIgnore();
728 // In the PNaCl ABI we always return records/structures on the stack.
729 if (isAggregateTypeForABI(RetTy))
730 return getNaturalAlignIndirect(RetTy);
732 // Treat an enum type as its underlying type.
733 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
734 RetTy = EnumTy->getDecl()->getIntegerType();
736 return (RetTy->isPromotableIntegerType() ?
737 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
740 /// IsX86_MMXType - Return true if this is an MMX type.
741 bool IsX86_MMXType(llvm::Type *IRType) {
742 // Return true if the type is an MMX type <2 x i32>, <4 x i16>, or <8 x i8>.
743 return IRType->isVectorTy() && IRType->getPrimitiveSizeInBits() == 64 &&
744 cast<llvm::VectorType>(IRType)->getElementType()->isIntegerTy() &&
745 IRType->getScalarSizeInBits() != 64;
748 static llvm::Type* X86AdjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
749 StringRef Constraint,
751 if ((Constraint == "y" || Constraint == "&y") && Ty->isVectorTy()) {
752 if (cast<llvm::VectorType>(Ty)->getBitWidth() != 64) {
753 // Invalid MMX constraint
757 return llvm::Type::getX86_MMXTy(CGF.getLLVMContext());
760 // No operation needed
764 /// Returns true if this type can be passed in SSE registers with the
765 /// X86_VectorCall calling convention. Shared between x86_32 and x86_64.
766 static bool isX86VectorTypeForVectorCall(ASTContext &Context, QualType Ty) {
767 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
768 if (BT->isFloatingPoint() && BT->getKind() != BuiltinType::Half)
770 } else if (const VectorType *VT = Ty->getAs<VectorType>()) {
771 // vectorcall can pass XMM, YMM, and ZMM vectors. We don't pass SSE1 MMX
772 // registers specially.
773 unsigned VecSize = Context.getTypeSize(VT);
774 if (VecSize == 128 || VecSize == 256 || VecSize == 512)
780 /// Returns true if this aggregate is small enough to be passed in SSE registers
781 /// in the X86_VectorCall calling convention. Shared between x86_32 and x86_64.
782 static bool isX86VectorCallAggregateSmallEnough(uint64_t NumMembers) {
783 return NumMembers <= 4;
786 //===----------------------------------------------------------------------===//
787 // X86-32 ABI Implementation
788 //===----------------------------------------------------------------------===//
790 /// \brief Similar to llvm::CCState, but for Clang.
792 CCState(unsigned CC) : CC(CC), FreeRegs(0), FreeSSERegs(0) {}
796 unsigned FreeSSERegs;
799 /// X86_32ABIInfo - The X86-32 ABI information.
800 class X86_32ABIInfo : public ABIInfo {
806 static const unsigned MinABIStackAlignInBytes = 4;
808 bool IsDarwinVectorABI;
809 bool IsRetSmallStructInRegABI;
810 bool IsWin32StructABI;
813 unsigned DefaultNumRegisterParameters;
815 static bool isRegisterSize(unsigned Size) {
816 return (Size == 8 || Size == 16 || Size == 32 || Size == 64);
819 bool isHomogeneousAggregateBaseType(QualType Ty) const override {
820 // FIXME: Assumes vectorcall is in use.
821 return isX86VectorTypeForVectorCall(getContext(), Ty);
824 bool isHomogeneousAggregateSmallEnough(const Type *Ty,
825 uint64_t NumMembers) const override {
826 // FIXME: Assumes vectorcall is in use.
827 return isX86VectorCallAggregateSmallEnough(NumMembers);
830 bool shouldReturnTypeInRegister(QualType Ty, ASTContext &Context) const;
832 /// getIndirectResult - Give a source type \arg Ty, return a suitable result
833 /// such that the argument will be passed in memory.
834 ABIArgInfo getIndirectResult(QualType Ty, bool ByVal, CCState &State) const;
836 ABIArgInfo getIndirectReturnResult(QualType Ty, CCState &State) const;
838 /// \brief Return the alignment to use for the given type on the stack.
839 unsigned getTypeStackAlignInBytes(QualType Ty, unsigned Align) const;
841 Class classify(QualType Ty) const;
842 ABIArgInfo classifyReturnType(QualType RetTy, CCState &State) const;
843 ABIArgInfo classifyArgumentType(QualType RetTy, CCState &State) const;
844 /// \brief Updates the number of available free registers, returns
845 /// true if any registers were allocated.
846 bool updateFreeRegs(QualType Ty, CCState &State) const;
848 bool shouldAggregateUseDirect(QualType Ty, CCState &State, bool &InReg,
849 bool &NeedsPadding) const;
850 bool shouldPrimitiveUseInReg(QualType Ty, CCState &State) const;
852 /// \brief Rewrite the function info so that all memory arguments use
854 void rewriteWithInAlloca(CGFunctionInfo &FI) const;
856 void addFieldToArgStruct(SmallVector<llvm::Type *, 6> &FrameFields,
857 CharUnits &StackOffset, ABIArgInfo &Info,
858 QualType Type) const;
862 void computeInfo(CGFunctionInfo &FI) const override;
863 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
864 QualType Ty) const override;
866 X86_32ABIInfo(CodeGen::CodeGenTypes &CGT, bool DarwinVectorABI,
867 bool RetSmallStructInRegABI, bool Win32StructABI,
868 unsigned NumRegisterParameters, bool SoftFloatABI)
869 : ABIInfo(CGT), IsDarwinVectorABI(DarwinVectorABI),
870 IsRetSmallStructInRegABI(RetSmallStructInRegABI),
871 IsWin32StructABI(Win32StructABI),
872 IsSoftFloatABI(SoftFloatABI),
873 IsMCUABI(CGT.getTarget().getTriple().isOSIAMCU()),
874 DefaultNumRegisterParameters(NumRegisterParameters) {}
877 class X86_32TargetCodeGenInfo : public TargetCodeGenInfo {
879 X86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool DarwinVectorABI,
880 bool RetSmallStructInRegABI, bool Win32StructABI,
881 unsigned NumRegisterParameters, bool SoftFloatABI)
882 : TargetCodeGenInfo(new X86_32ABIInfo(
883 CGT, DarwinVectorABI, RetSmallStructInRegABI, Win32StructABI,
884 NumRegisterParameters, SoftFloatABI)) {}
886 static bool isStructReturnInRegABI(
887 const llvm::Triple &Triple, const CodeGenOptions &Opts);
889 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
890 CodeGen::CodeGenModule &CGM) const override;
892 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
893 // Darwin uses different dwarf register numbers for EH.
894 if (CGM.getTarget().getTriple().isOSDarwin()) return 5;
898 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
899 llvm::Value *Address) const override;
901 llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
902 StringRef Constraint,
903 llvm::Type* Ty) const override {
904 return X86AdjustInlineAsmType(CGF, Constraint, Ty);
907 void addReturnRegisterOutputs(CodeGenFunction &CGF, LValue ReturnValue,
908 std::string &Constraints,
909 std::vector<llvm::Type *> &ResultRegTypes,
910 std::vector<llvm::Type *> &ResultTruncRegTypes,
911 std::vector<LValue> &ResultRegDests,
912 std::string &AsmString,
913 unsigned NumOutputs) const override;
916 getUBSanFunctionSignature(CodeGen::CodeGenModule &CGM) const override {
917 unsigned Sig = (0xeb << 0) | // jmp rel8
918 (0x06 << 8) | // .+0x08
921 return llvm::ConstantInt::get(CGM.Int32Ty, Sig);
927 /// Rewrite input constraint references after adding some output constraints.
928 /// In the case where there is one output and one input and we add one output,
929 /// we need to replace all operand references greater than or equal to 1:
932 /// The result will be:
935 static void rewriteInputConstraintReferences(unsigned FirstIn,
937 std::string &AsmString) {
939 llvm::raw_string_ostream OS(Buf);
941 while (Pos < AsmString.size()) {
942 size_t DollarStart = AsmString.find('$', Pos);
943 if (DollarStart == std::string::npos)
944 DollarStart = AsmString.size();
945 size_t DollarEnd = AsmString.find_first_not_of('$', DollarStart);
946 if (DollarEnd == std::string::npos)
947 DollarEnd = AsmString.size();
948 OS << StringRef(&AsmString[Pos], DollarEnd - Pos);
950 size_t NumDollars = DollarEnd - DollarStart;
951 if (NumDollars % 2 != 0 && Pos < AsmString.size()) {
952 // We have an operand reference.
953 size_t DigitStart = Pos;
954 size_t DigitEnd = AsmString.find_first_not_of("0123456789", DigitStart);
955 if (DigitEnd == std::string::npos)
956 DigitEnd = AsmString.size();
957 StringRef OperandStr(&AsmString[DigitStart], DigitEnd - DigitStart);
958 unsigned OperandIndex;
959 if (!OperandStr.getAsInteger(10, OperandIndex)) {
960 if (OperandIndex >= FirstIn)
961 OperandIndex += NumNewOuts;
969 AsmString = std::move(OS.str());
972 /// Add output constraints for EAX:EDX because they are return registers.
973 void X86_32TargetCodeGenInfo::addReturnRegisterOutputs(
974 CodeGenFunction &CGF, LValue ReturnSlot, std::string &Constraints,
975 std::vector<llvm::Type *> &ResultRegTypes,
976 std::vector<llvm::Type *> &ResultTruncRegTypes,
977 std::vector<LValue> &ResultRegDests, std::string &AsmString,
978 unsigned NumOutputs) const {
979 uint64_t RetWidth = CGF.getContext().getTypeSize(ReturnSlot.getType());
981 // Use the EAX constraint if the width is 32 or smaller and EAX:EDX if it is
983 if (!Constraints.empty())
985 if (RetWidth <= 32) {
986 Constraints += "={eax}";
987 ResultRegTypes.push_back(CGF.Int32Ty);
989 // Use the 'A' constraint for EAX:EDX.
991 ResultRegTypes.push_back(CGF.Int64Ty);
994 // Truncate EAX or EAX:EDX to an integer of the appropriate size.
995 llvm::Type *CoerceTy = llvm::IntegerType::get(CGF.getLLVMContext(), RetWidth);
996 ResultTruncRegTypes.push_back(CoerceTy);
998 // Coerce the integer by bitcasting the return slot pointer.
999 ReturnSlot.setAddress(CGF.Builder.CreateBitCast(ReturnSlot.getAddress(),
1000 CoerceTy->getPointerTo()));
1001 ResultRegDests.push_back(ReturnSlot);
1003 rewriteInputConstraintReferences(NumOutputs, 1, AsmString);
1006 /// shouldReturnTypeInRegister - Determine if the given type should be
1007 /// returned in a register (for the Darwin and MCU ABI).
1008 bool X86_32ABIInfo::shouldReturnTypeInRegister(QualType Ty,
1009 ASTContext &Context) const {
1010 uint64_t Size = Context.getTypeSize(Ty);
1012 // For i386, type must be register sized.
1013 // For the MCU ABI, it only needs to be <= 8-byte
1014 if ((IsMCUABI && Size > 64) || (!IsMCUABI && !isRegisterSize(Size)))
1017 if (Ty->isVectorType()) {
1018 // 64- and 128- bit vectors inside structures are not returned in
1020 if (Size == 64 || Size == 128)
1026 // If this is a builtin, pointer, enum, complex type, member pointer, or
1027 // member function pointer it is ok.
1028 if (Ty->getAs<BuiltinType>() || Ty->hasPointerRepresentation() ||
1029 Ty->isAnyComplexType() || Ty->isEnumeralType() ||
1030 Ty->isBlockPointerType() || Ty->isMemberPointerType())
1033 // Arrays are treated like records.
1034 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty))
1035 return shouldReturnTypeInRegister(AT->getElementType(), Context);
1037 // Otherwise, it must be a record type.
1038 const RecordType *RT = Ty->getAs<RecordType>();
1039 if (!RT) return false;
1041 // FIXME: Traverse bases here too.
1043 // Structure types are passed in register if all fields would be
1044 // passed in a register.
1045 for (const auto *FD : RT->getDecl()->fields()) {
1046 // Empty fields are ignored.
1047 if (isEmptyField(Context, FD, true))
1050 // Check fields recursively.
1051 if (!shouldReturnTypeInRegister(FD->getType(), Context))
1057 ABIArgInfo X86_32ABIInfo::getIndirectReturnResult(QualType RetTy, CCState &State) const {
1058 // If the return value is indirect, then the hidden argument is consuming one
1059 // integer register.
1060 if (State.FreeRegs) {
1063 return getNaturalAlignIndirectInReg(RetTy);
1065 return getNaturalAlignIndirect(RetTy, /*ByVal=*/false);
1068 ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy,
1069 CCState &State) const {
1070 if (RetTy->isVoidType())
1071 return ABIArgInfo::getIgnore();
1073 const Type *Base = nullptr;
1074 uint64_t NumElts = 0;
1075 if (State.CC == llvm::CallingConv::X86_VectorCall &&
1076 isHomogeneousAggregate(RetTy, Base, NumElts)) {
1077 // The LLVM struct type for such an aggregate should lower properly.
1078 return ABIArgInfo::getDirect();
1081 if (const VectorType *VT = RetTy->getAs<VectorType>()) {
1082 // On Darwin, some vectors are returned in registers.
1083 if (IsDarwinVectorABI) {
1084 uint64_t Size = getContext().getTypeSize(RetTy);
1086 // 128-bit vectors are a special case; they are returned in
1087 // registers and we need to make sure to pick a type the LLVM
1088 // backend will like.
1090 return ABIArgInfo::getDirect(llvm::VectorType::get(
1091 llvm::Type::getInt64Ty(getVMContext()), 2));
1093 // Always return in register if it fits in a general purpose
1094 // register, or if it is 64 bits and has a single element.
1095 if ((Size == 8 || Size == 16 || Size == 32) ||
1096 (Size == 64 && VT->getNumElements() == 1))
1097 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
1100 return getIndirectReturnResult(RetTy, State);
1103 return ABIArgInfo::getDirect();
1106 if (isAggregateTypeForABI(RetTy)) {
1107 if (const RecordType *RT = RetTy->getAs<RecordType>()) {
1108 // Structures with flexible arrays are always indirect.
1109 if (RT->getDecl()->hasFlexibleArrayMember())
1110 return getIndirectReturnResult(RetTy, State);
1113 // If specified, structs and unions are always indirect.
1114 if (!IsRetSmallStructInRegABI && !RetTy->isAnyComplexType())
1115 return getIndirectReturnResult(RetTy, State);
1117 // Small structures which are register sized are generally returned
1119 if (shouldReturnTypeInRegister(RetTy, getContext())) {
1120 uint64_t Size = getContext().getTypeSize(RetTy);
1122 // As a special-case, if the struct is a "single-element" struct, and
1123 // the field is of type "float" or "double", return it in a
1124 // floating-point register. (MSVC does not apply this special case.)
1125 // We apply a similar transformation for pointer types to improve the
1126 // quality of the generated IR.
1127 if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext()))
1128 if ((!IsWin32StructABI && SeltTy->isRealFloatingType())
1129 || SeltTy->hasPointerRepresentation())
1130 return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
1132 // FIXME: We should be able to narrow this integer in cases with dead
1134 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),Size));
1137 return getIndirectReturnResult(RetTy, State);
1140 // Treat an enum type as its underlying type.
1141 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
1142 RetTy = EnumTy->getDecl()->getIntegerType();
1144 return (RetTy->isPromotableIntegerType() ?
1145 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
1148 static bool isSSEVectorType(ASTContext &Context, QualType Ty) {
1149 return Ty->getAs<VectorType>() && Context.getTypeSize(Ty) == 128;
1152 static bool isRecordWithSSEVectorType(ASTContext &Context, QualType Ty) {
1153 const RecordType *RT = Ty->getAs<RecordType>();
1156 const RecordDecl *RD = RT->getDecl();
1158 // If this is a C++ record, check the bases first.
1159 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
1160 for (const auto &I : CXXRD->bases())
1161 if (!isRecordWithSSEVectorType(Context, I.getType()))
1164 for (const auto *i : RD->fields()) {
1165 QualType FT = i->getType();
1167 if (isSSEVectorType(Context, FT))
1170 if (isRecordWithSSEVectorType(Context, FT))
1177 unsigned X86_32ABIInfo::getTypeStackAlignInBytes(QualType Ty,
1178 unsigned Align) const {
1179 // Otherwise, if the alignment is less than or equal to the minimum ABI
1180 // alignment, just use the default; the backend will handle this.
1181 if (Align <= MinABIStackAlignInBytes)
1182 return 0; // Use default alignment.
1184 // On non-Darwin, the stack type alignment is always 4.
1185 if (!IsDarwinVectorABI) {
1186 // Set explicit alignment, since we may need to realign the top.
1187 return MinABIStackAlignInBytes;
1190 // Otherwise, if the type contains an SSE vector type, the alignment is 16.
1191 if (Align >= 16 && (isSSEVectorType(getContext(), Ty) ||
1192 isRecordWithSSEVectorType(getContext(), Ty)))
1195 return MinABIStackAlignInBytes;
1198 ABIArgInfo X86_32ABIInfo::getIndirectResult(QualType Ty, bool ByVal,
1199 CCState &State) const {
1201 if (State.FreeRegs) {
1202 --State.FreeRegs; // Non-byval indirects just use one pointer.
1204 return getNaturalAlignIndirectInReg(Ty);
1206 return getNaturalAlignIndirect(Ty, false);
1209 // Compute the byval alignment.
1210 unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8;
1211 unsigned StackAlign = getTypeStackAlignInBytes(Ty, TypeAlign);
1212 if (StackAlign == 0)
1213 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(4), /*ByVal=*/true);
1215 // If the stack alignment is less than the type alignment, realign the
1217 bool Realign = TypeAlign > StackAlign;
1218 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(StackAlign),
1219 /*ByVal=*/true, Realign);
1222 X86_32ABIInfo::Class X86_32ABIInfo::classify(QualType Ty) const {
1223 const Type *T = isSingleElementStruct(Ty, getContext());
1225 T = Ty.getTypePtr();
1227 if (const BuiltinType *BT = T->getAs<BuiltinType>()) {
1228 BuiltinType::Kind K = BT->getKind();
1229 if (K == BuiltinType::Float || K == BuiltinType::Double)
1235 bool X86_32ABIInfo::updateFreeRegs(QualType Ty, CCState &State) const {
1236 if (!IsSoftFloatABI) {
1237 Class C = classify(Ty);
1242 unsigned Size = getContext().getTypeSize(Ty);
1243 unsigned SizeInRegs = (Size + 31) / 32;
1245 if (SizeInRegs == 0)
1249 if (SizeInRegs > State.FreeRegs) {
1254 // The MCU psABI allows passing parameters in-reg even if there are
1255 // earlier parameters that are passed on the stack. Also,
1256 // it does not allow passing >8-byte structs in-register,
1257 // even if there are 3 free registers available.
1258 if (SizeInRegs > State.FreeRegs || SizeInRegs > 2)
1262 State.FreeRegs -= SizeInRegs;
1266 bool X86_32ABIInfo::shouldAggregateUseDirect(QualType Ty, CCState &State,
1268 bool &NeedsPadding) const {
1269 NeedsPadding = false;
1272 if (!updateFreeRegs(Ty, State))
1278 if (State.CC == llvm::CallingConv::X86_FastCall ||
1279 State.CC == llvm::CallingConv::X86_VectorCall) {
1280 if (getContext().getTypeSize(Ty) <= 32 && State.FreeRegs)
1281 NeedsPadding = true;
1289 bool X86_32ABIInfo::shouldPrimitiveUseInReg(QualType Ty, CCState &State) const {
1290 if (!updateFreeRegs(Ty, State))
1296 if (State.CC == llvm::CallingConv::X86_FastCall ||
1297 State.CC == llvm::CallingConv::X86_VectorCall) {
1298 if (getContext().getTypeSize(Ty) > 32)
1301 return (Ty->isIntegralOrEnumerationType() || Ty->isPointerType() ||
1302 Ty->isReferenceType());
1308 ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty,
1309 CCState &State) const {
1310 // FIXME: Set alignment on indirect arguments.
1312 Ty = useFirstFieldIfTransparentUnion(Ty);
1314 // Check with the C++ ABI first.
1315 const RecordType *RT = Ty->getAs<RecordType>();
1317 CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI());
1318 if (RAA == CGCXXABI::RAA_Indirect) {
1319 return getIndirectResult(Ty, false, State);
1320 } else if (RAA == CGCXXABI::RAA_DirectInMemory) {
1321 // The field index doesn't matter, we'll fix it up later.
1322 return ABIArgInfo::getInAlloca(/*FieldIndex=*/0);
1326 // vectorcall adds the concept of a homogenous vector aggregate, similar
1327 // to other targets.
1328 const Type *Base = nullptr;
1329 uint64_t NumElts = 0;
1330 if (State.CC == llvm::CallingConv::X86_VectorCall &&
1331 isHomogeneousAggregate(Ty, Base, NumElts)) {
1332 if (State.FreeSSERegs >= NumElts) {
1333 State.FreeSSERegs -= NumElts;
1334 if (Ty->isBuiltinType() || Ty->isVectorType())
1335 return ABIArgInfo::getDirect();
1336 return ABIArgInfo::getExpand();
1338 return getIndirectResult(Ty, /*ByVal=*/false, State);
1341 if (isAggregateTypeForABI(Ty)) {
1343 // Structs are always byval on win32, regardless of what they contain.
1344 if (IsWin32StructABI)
1345 return getIndirectResult(Ty, true, State);
1347 // Structures with flexible arrays are always indirect.
1348 if (RT->getDecl()->hasFlexibleArrayMember())
1349 return getIndirectResult(Ty, true, State);
1352 // Ignore empty structs/unions.
1353 if (isEmptyRecord(getContext(), Ty, true))
1354 return ABIArgInfo::getIgnore();
1356 llvm::LLVMContext &LLVMContext = getVMContext();
1357 llvm::IntegerType *Int32 = llvm::Type::getInt32Ty(LLVMContext);
1358 bool NeedsPadding, InReg;
1359 if (shouldAggregateUseDirect(Ty, State, InReg, NeedsPadding)) {
1360 unsigned SizeInRegs = (getContext().getTypeSize(Ty) + 31) / 32;
1361 SmallVector<llvm::Type*, 3> Elements(SizeInRegs, Int32);
1362 llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements);
1364 return ABIArgInfo::getDirectInReg(Result);
1366 return ABIArgInfo::getDirect(Result);
1368 llvm::IntegerType *PaddingType = NeedsPadding ? Int32 : nullptr;
1370 // Expand small (<= 128-bit) record types when we know that the stack layout
1371 // of those arguments will match the struct. This is important because the
1372 // LLVM backend isn't smart enough to remove byval, which inhibits many
1374 // Don't do this for the MCU if there are still free integer registers
1375 // (see X86_64 ABI for full explanation).
1376 if (getContext().getTypeSize(Ty) <= 4*32 &&
1377 canExpandIndirectArgument(Ty, getContext()) &&
1378 (!IsMCUABI || State.FreeRegs == 0))
1379 return ABIArgInfo::getExpandWithPadding(
1380 State.CC == llvm::CallingConv::X86_FastCall ||
1381 State.CC == llvm::CallingConv::X86_VectorCall,
1384 return getIndirectResult(Ty, true, State);
1387 if (const VectorType *VT = Ty->getAs<VectorType>()) {
1388 // On Darwin, some vectors are passed in memory, we handle this by passing
1389 // it as an i8/i16/i32/i64.
1390 if (IsDarwinVectorABI) {
1391 uint64_t Size = getContext().getTypeSize(Ty);
1392 if ((Size == 8 || Size == 16 || Size == 32) ||
1393 (Size == 64 && VT->getNumElements() == 1))
1394 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
1398 if (IsX86_MMXType(CGT.ConvertType(Ty)))
1399 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 64));
1401 return ABIArgInfo::getDirect();
1405 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
1406 Ty = EnumTy->getDecl()->getIntegerType();
1408 bool InReg = shouldPrimitiveUseInReg(Ty, State);
1410 if (Ty->isPromotableIntegerType()) {
1412 return ABIArgInfo::getExtendInReg();
1413 return ABIArgInfo::getExtend();
1417 return ABIArgInfo::getDirectInReg();
1418 return ABIArgInfo::getDirect();
1421 void X86_32ABIInfo::computeInfo(CGFunctionInfo &FI) const {
1422 CCState State(FI.getCallingConvention());
1425 else if (State.CC == llvm::CallingConv::X86_FastCall)
1427 else if (State.CC == llvm::CallingConv::X86_VectorCall) {
1429 State.FreeSSERegs = 6;
1430 } else if (FI.getHasRegParm())
1431 State.FreeRegs = FI.getRegParm();
1433 State.FreeRegs = DefaultNumRegisterParameters;
1435 if (!getCXXABI().classifyReturnType(FI)) {
1436 FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), State);
1437 } else if (FI.getReturnInfo().isIndirect()) {
1438 // The C++ ABI is not aware of register usage, so we have to check if the
1439 // return value was sret and put it in a register ourselves if appropriate.
1440 if (State.FreeRegs) {
1441 --State.FreeRegs; // The sret parameter consumes a register.
1443 FI.getReturnInfo().setInReg(true);
1447 // The chain argument effectively gives us another free register.
1448 if (FI.isChainCall())
1451 bool UsedInAlloca = false;
1452 for (auto &I : FI.arguments()) {
1453 I.info = classifyArgumentType(I.type, State);
1454 UsedInAlloca |= (I.info.getKind() == ABIArgInfo::InAlloca);
1457 // If we needed to use inalloca for any argument, do a second pass and rewrite
1458 // all the memory arguments to use inalloca.
1460 rewriteWithInAlloca(FI);
1464 X86_32ABIInfo::addFieldToArgStruct(SmallVector<llvm::Type *, 6> &FrameFields,
1465 CharUnits &StackOffset, ABIArgInfo &Info,
1466 QualType Type) const {
1467 // Arguments are always 4-byte-aligned.
1468 CharUnits FieldAlign = CharUnits::fromQuantity(4);
1470 assert(StackOffset.isMultipleOf(FieldAlign) && "unaligned inalloca struct");
1471 Info = ABIArgInfo::getInAlloca(FrameFields.size());
1472 FrameFields.push_back(CGT.ConvertTypeForMem(Type));
1473 StackOffset += getContext().getTypeSizeInChars(Type);
1475 // Insert padding bytes to respect alignment.
1476 CharUnits FieldEnd = StackOffset;
1477 StackOffset = FieldEnd.RoundUpToAlignment(FieldAlign);
1478 if (StackOffset != FieldEnd) {
1479 CharUnits NumBytes = StackOffset - FieldEnd;
1480 llvm::Type *Ty = llvm::Type::getInt8Ty(getVMContext());
1481 Ty = llvm::ArrayType::get(Ty, NumBytes.getQuantity());
1482 FrameFields.push_back(Ty);
1486 static bool isArgInAlloca(const ABIArgInfo &Info) {
1487 // Leave ignored and inreg arguments alone.
1488 switch (Info.getKind()) {
1489 case ABIArgInfo::InAlloca:
1491 case ABIArgInfo::Indirect:
1492 assert(Info.getIndirectByVal());
1494 case ABIArgInfo::Ignore:
1496 case ABIArgInfo::Direct:
1497 case ABIArgInfo::Extend:
1498 case ABIArgInfo::Expand:
1499 if (Info.getInReg())
1503 llvm_unreachable("invalid enum");
1506 void X86_32ABIInfo::rewriteWithInAlloca(CGFunctionInfo &FI) const {
1507 assert(IsWin32StructABI && "inalloca only supported on win32");
1509 // Build a packed struct type for all of the arguments in memory.
1510 SmallVector<llvm::Type *, 6> FrameFields;
1512 // The stack alignment is always 4.
1513 CharUnits StackAlign = CharUnits::fromQuantity(4);
1515 CharUnits StackOffset;
1516 CGFunctionInfo::arg_iterator I = FI.arg_begin(), E = FI.arg_end();
1518 // Put 'this' into the struct before 'sret', if necessary.
1520 FI.getCallingConvention() == llvm::CallingConv::X86_ThisCall;
1521 ABIArgInfo &Ret = FI.getReturnInfo();
1522 if (Ret.isIndirect() && Ret.isSRetAfterThis() && !IsThisCall &&
1523 isArgInAlloca(I->info)) {
1524 addFieldToArgStruct(FrameFields, StackOffset, I->info, I->type);
1528 // Put the sret parameter into the inalloca struct if it's in memory.
1529 if (Ret.isIndirect() && !Ret.getInReg()) {
1530 CanQualType PtrTy = getContext().getPointerType(FI.getReturnType());
1531 addFieldToArgStruct(FrameFields, StackOffset, Ret, PtrTy);
1532 // On Windows, the hidden sret parameter is always returned in eax.
1533 Ret.setInAllocaSRet(IsWin32StructABI);
1536 // Skip the 'this' parameter in ecx.
1540 // Put arguments passed in memory into the struct.
1541 for (; I != E; ++I) {
1542 if (isArgInAlloca(I->info))
1543 addFieldToArgStruct(FrameFields, StackOffset, I->info, I->type);
1546 FI.setArgStruct(llvm::StructType::get(getVMContext(), FrameFields,
1551 Address X86_32ABIInfo::EmitVAArg(CodeGenFunction &CGF,
1552 Address VAListAddr, QualType Ty) const {
1554 auto TypeInfo = getContext().getTypeInfoInChars(Ty);
1556 // x86-32 changes the alignment of certain arguments on the stack.
1558 // Just messing with TypeInfo like this works because we never pass
1559 // anything indirectly.
1560 TypeInfo.second = CharUnits::fromQuantity(
1561 getTypeStackAlignInBytes(Ty, TypeInfo.second.getQuantity()));
1563 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*Indirect*/ false,
1564 TypeInfo, CharUnits::fromQuantity(4),
1565 /*AllowHigherAlign*/ true);
1568 bool X86_32TargetCodeGenInfo::isStructReturnInRegABI(
1569 const llvm::Triple &Triple, const CodeGenOptions &Opts) {
1570 assert(Triple.getArch() == llvm::Triple::x86);
1572 switch (Opts.getStructReturnConvention()) {
1573 case CodeGenOptions::SRCK_Default:
1575 case CodeGenOptions::SRCK_OnStack: // -fpcc-struct-return
1577 case CodeGenOptions::SRCK_InRegs: // -freg-struct-return
1581 if (Triple.isOSDarwin() || Triple.isOSIAMCU())
1584 switch (Triple.getOS()) {
1585 case llvm::Triple::DragonFly:
1586 case llvm::Triple::FreeBSD:
1587 case llvm::Triple::OpenBSD:
1588 case llvm::Triple::Bitrig:
1589 case llvm::Triple::Win32:
1596 void X86_32TargetCodeGenInfo::setTargetAttributes(const Decl *D,
1597 llvm::GlobalValue *GV,
1598 CodeGen::CodeGenModule &CGM) const {
1599 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
1600 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
1601 // Get the LLVM function.
1602 llvm::Function *Fn = cast<llvm::Function>(GV);
1604 // Now add the 'alignstack' attribute with a value of 16.
1605 llvm::AttrBuilder B;
1606 B.addStackAlignmentAttr(16);
1607 Fn->addAttributes(llvm::AttributeSet::FunctionIndex,
1608 llvm::AttributeSet::get(CGM.getLLVMContext(),
1609 llvm::AttributeSet::FunctionIndex,
1615 bool X86_32TargetCodeGenInfo::initDwarfEHRegSizeTable(
1616 CodeGen::CodeGenFunction &CGF,
1617 llvm::Value *Address) const {
1618 CodeGen::CGBuilderTy &Builder = CGF.Builder;
1620 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4);
1622 // 0-7 are the eight integer registers; the order is different
1623 // on Darwin (for EH), but the range is the same.
1625 AssignToArrayRange(Builder, Address, Four8, 0, 8);
1627 if (CGF.CGM.getTarget().getTriple().isOSDarwin()) {
1628 // 12-16 are st(0..4). Not sure why we stop at 4.
1629 // These have size 16, which is sizeof(long double) on
1630 // platforms with 8-byte alignment for that type.
1631 llvm::Value *Sixteen8 = llvm::ConstantInt::get(CGF.Int8Ty, 16);
1632 AssignToArrayRange(Builder, Address, Sixteen8, 12, 16);
1635 // 9 is %eflags, which doesn't get a size on Darwin for some
1637 Builder.CreateAlignedStore(
1638 Four8, Builder.CreateConstInBoundsGEP1_32(CGF.Int8Ty, Address, 9),
1641 // 11-16 are st(0..5). Not sure why we stop at 5.
1642 // These have size 12, which is sizeof(long double) on
1643 // platforms with 4-byte alignment for that type.
1644 llvm::Value *Twelve8 = llvm::ConstantInt::get(CGF.Int8Ty, 12);
1645 AssignToArrayRange(Builder, Address, Twelve8, 11, 16);
1651 //===----------------------------------------------------------------------===//
1652 // X86-64 ABI Implementation
1653 //===----------------------------------------------------------------------===//
1657 /// The AVX ABI level for X86 targets.
1658 enum class X86AVXABILevel {
1664 /// \p returns the size in bits of the largest (native) vector for \p AVXLevel.
1665 static unsigned getNativeVectorSizeForAVXABI(X86AVXABILevel AVXLevel) {
1667 case X86AVXABILevel::AVX512:
1669 case X86AVXABILevel::AVX:
1671 case X86AVXABILevel::None:
1674 llvm_unreachable("Unknown AVXLevel");
1677 /// X86_64ABIInfo - The X86_64 ABI information.
1678 class X86_64ABIInfo : public ABIInfo {
1690 /// merge - Implement the X86_64 ABI merging algorithm.
1692 /// Merge an accumulating classification \arg Accum with a field
1693 /// classification \arg Field.
1695 /// \param Accum - The accumulating classification. This should
1696 /// always be either NoClass or the result of a previous merge
1697 /// call. In addition, this should never be Memory (the caller
1698 /// should just return Memory for the aggregate).
1699 static Class merge(Class Accum, Class Field);
1701 /// postMerge - Implement the X86_64 ABI post merging algorithm.
1703 /// Post merger cleanup, reduces a malformed Hi and Lo pair to
1704 /// final MEMORY or SSE classes when necessary.
1706 /// \param AggregateSize - The size of the current aggregate in
1707 /// the classification process.
1709 /// \param Lo - The classification for the parts of the type
1710 /// residing in the low word of the containing object.
1712 /// \param Hi - The classification for the parts of the type
1713 /// residing in the higher words of the containing object.
1715 void postMerge(unsigned AggregateSize, Class &Lo, Class &Hi) const;
1717 /// classify - Determine the x86_64 register classes in which the
1718 /// given type T should be passed.
1720 /// \param Lo - The classification for the parts of the type
1721 /// residing in the low word of the containing object.
1723 /// \param Hi - The classification for the parts of the type
1724 /// residing in the high word of the containing object.
1726 /// \param OffsetBase - The bit offset of this type in the
1727 /// containing object. Some parameters are classified different
1728 /// depending on whether they straddle an eightbyte boundary.
1730 /// \param isNamedArg - Whether the argument in question is a "named"
1731 /// argument, as used in AMD64-ABI 3.5.7.
1733 /// If a word is unused its result will be NoClass; if a type should
1734 /// be passed in Memory then at least the classification of \arg Lo
1737 /// The \arg Lo class will be NoClass iff the argument is ignored.
1739 /// If the \arg Lo class is ComplexX87, then the \arg Hi class will
1740 /// also be ComplexX87.
1741 void classify(QualType T, uint64_t OffsetBase, Class &Lo, Class &Hi,
1742 bool isNamedArg) const;
1744 llvm::Type *GetByteVectorType(QualType Ty) const;
1745 llvm::Type *GetSSETypeAtOffset(llvm::Type *IRType,
1746 unsigned IROffset, QualType SourceTy,
1747 unsigned SourceOffset) const;
1748 llvm::Type *GetINTEGERTypeAtOffset(llvm::Type *IRType,
1749 unsigned IROffset, QualType SourceTy,
1750 unsigned SourceOffset) const;
1752 /// getIndirectResult - Give a source type \arg Ty, return a suitable result
1753 /// such that the argument will be returned in memory.
1754 ABIArgInfo getIndirectReturnResult(QualType Ty) const;
1756 /// getIndirectResult - Give a source type \arg Ty, return a suitable result
1757 /// such that the argument will be passed in memory.
1759 /// \param freeIntRegs - The number of free integer registers remaining
1761 ABIArgInfo getIndirectResult(QualType Ty, unsigned freeIntRegs) const;
1763 ABIArgInfo classifyReturnType(QualType RetTy) const;
1765 ABIArgInfo classifyArgumentType(QualType Ty,
1766 unsigned freeIntRegs,
1767 unsigned &neededInt,
1768 unsigned &neededSSE,
1769 bool isNamedArg) const;
1771 bool IsIllegalVectorType(QualType Ty) const;
1773 /// The 0.98 ABI revision clarified a lot of ambiguities,
1774 /// unfortunately in ways that were not always consistent with
1775 /// certain previous compilers. In particular, platforms which
1776 /// required strict binary compatibility with older versions of GCC
1777 /// may need to exempt themselves.
1778 bool honorsRevision0_98() const {
1779 return !getTarget().getTriple().isOSDarwin();
1782 X86AVXABILevel AVXLevel;
1783 // Some ABIs (e.g. X32 ABI and Native Client OS) use 32 bit pointers on
1785 bool Has64BitPointers;
1788 X86_64ABIInfo(CodeGen::CodeGenTypes &CGT, X86AVXABILevel AVXLevel) :
1789 ABIInfo(CGT), AVXLevel(AVXLevel),
1790 Has64BitPointers(CGT.getDataLayout().getPointerSize(0) == 8) {
1793 bool isPassedUsingAVXType(QualType type) const {
1794 unsigned neededInt, neededSSE;
1795 // The freeIntRegs argument doesn't matter here.
1796 ABIArgInfo info = classifyArgumentType(type, 0, neededInt, neededSSE,
1797 /*isNamedArg*/true);
1798 if (info.isDirect()) {
1799 llvm::Type *ty = info.getCoerceToType();
1800 if (llvm::VectorType *vectorTy = dyn_cast_or_null<llvm::VectorType>(ty))
1801 return (vectorTy->getBitWidth() > 128);
1806 void computeInfo(CGFunctionInfo &FI) const override;
1808 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
1809 QualType Ty) const override;
1810 Address EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
1811 QualType Ty) const override;
1813 bool has64BitPointers() const {
1814 return Has64BitPointers;
1818 /// WinX86_64ABIInfo - The Windows X86_64 ABI information.
1819 class WinX86_64ABIInfo : public ABIInfo {
1821 WinX86_64ABIInfo(CodeGen::CodeGenTypes &CGT)
1823 IsMingw64(getTarget().getTriple().isWindowsGNUEnvironment()) {}
1825 void computeInfo(CGFunctionInfo &FI) const override;
1827 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
1828 QualType Ty) const override;
1830 bool isHomogeneousAggregateBaseType(QualType Ty) const override {
1831 // FIXME: Assumes vectorcall is in use.
1832 return isX86VectorTypeForVectorCall(getContext(), Ty);
1835 bool isHomogeneousAggregateSmallEnough(const Type *Ty,
1836 uint64_t NumMembers) const override {
1837 // FIXME: Assumes vectorcall is in use.
1838 return isX86VectorCallAggregateSmallEnough(NumMembers);
1842 ABIArgInfo classify(QualType Ty, unsigned &FreeSSERegs,
1843 bool IsReturnType) const;
1848 class X86_64TargetCodeGenInfo : public TargetCodeGenInfo {
1850 X86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, X86AVXABILevel AVXLevel)
1851 : TargetCodeGenInfo(new X86_64ABIInfo(CGT, AVXLevel)) {}
1853 const X86_64ABIInfo &getABIInfo() const {
1854 return static_cast<const X86_64ABIInfo&>(TargetCodeGenInfo::getABIInfo());
1857 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
1861 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
1862 llvm::Value *Address) const override {
1863 llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8);
1865 // 0-15 are the 16 integer registers.
1867 AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 16);
1871 llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
1872 StringRef Constraint,
1873 llvm::Type* Ty) const override {
1874 return X86AdjustInlineAsmType(CGF, Constraint, Ty);
1877 bool isNoProtoCallVariadic(const CallArgList &args,
1878 const FunctionNoProtoType *fnType) const override {
1879 // The default CC on x86-64 sets %al to the number of SSA
1880 // registers used, and GCC sets this when calling an unprototyped
1881 // function, so we override the default behavior. However, don't do
1882 // that when AVX types are involved: the ABI explicitly states it is
1883 // undefined, and it doesn't work in practice because of how the ABI
1884 // defines varargs anyway.
1885 if (fnType->getCallConv() == CC_C) {
1886 bool HasAVXType = false;
1887 for (CallArgList::const_iterator
1888 it = args.begin(), ie = args.end(); it != ie; ++it) {
1889 if (getABIInfo().isPassedUsingAVXType(it->Ty)) {
1899 return TargetCodeGenInfo::isNoProtoCallVariadic(args, fnType);
1903 getUBSanFunctionSignature(CodeGen::CodeGenModule &CGM) const override {
1905 if (getABIInfo().has64BitPointers())
1906 Sig = (0xeb << 0) | // jmp rel8
1907 (0x0a << 8) | // .+0x0c
1911 Sig = (0xeb << 0) | // jmp rel8
1912 (0x06 << 8) | // .+0x08
1915 return llvm::ConstantInt::get(CGM.Int32Ty, Sig);
1919 class PS4TargetCodeGenInfo : public X86_64TargetCodeGenInfo {
1921 PS4TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, X86AVXABILevel AVXLevel)
1922 : X86_64TargetCodeGenInfo(CGT, AVXLevel) {}
1924 void getDependentLibraryOption(llvm::StringRef Lib,
1925 llvm::SmallString<24> &Opt) const override {
1927 // If the argument contains a space, enclose it in quotes.
1928 if (Lib.find(" ") != StringRef::npos)
1929 Opt += "\"" + Lib.str() + "\"";
1935 static std::string qualifyWindowsLibrary(llvm::StringRef Lib) {
1936 // If the argument does not end in .lib, automatically add the suffix.
1937 // If the argument contains a space, enclose it in quotes.
1938 // This matches the behavior of MSVC.
1939 bool Quote = (Lib.find(" ") != StringRef::npos);
1940 std::string ArgStr = Quote ? "\"" : "";
1942 if (!Lib.endswith_lower(".lib"))
1944 ArgStr += Quote ? "\"" : "";
1948 class WinX86_32TargetCodeGenInfo : public X86_32TargetCodeGenInfo {
1950 WinX86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT,
1951 bool DarwinVectorABI, bool RetSmallStructInRegABI, bool Win32StructABI,
1952 unsigned NumRegisterParameters)
1953 : X86_32TargetCodeGenInfo(CGT, DarwinVectorABI, RetSmallStructInRegABI,
1954 Win32StructABI, NumRegisterParameters, false) {}
1956 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
1957 CodeGen::CodeGenModule &CGM) const override;
1959 void getDependentLibraryOption(llvm::StringRef Lib,
1960 llvm::SmallString<24> &Opt) const override {
1961 Opt = "/DEFAULTLIB:";
1962 Opt += qualifyWindowsLibrary(Lib);
1965 void getDetectMismatchOption(llvm::StringRef Name,
1966 llvm::StringRef Value,
1967 llvm::SmallString<32> &Opt) const override {
1968 Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\"";
1972 static void addStackProbeSizeTargetAttribute(const Decl *D,
1973 llvm::GlobalValue *GV,
1974 CodeGen::CodeGenModule &CGM) {
1975 if (D && isa<FunctionDecl>(D)) {
1976 if (CGM.getCodeGenOpts().StackProbeSize != 4096) {
1977 llvm::Function *Fn = cast<llvm::Function>(GV);
1979 Fn->addFnAttr("stack-probe-size",
1980 llvm::utostr(CGM.getCodeGenOpts().StackProbeSize));
1985 void WinX86_32TargetCodeGenInfo::setTargetAttributes(const Decl *D,
1986 llvm::GlobalValue *GV,
1987 CodeGen::CodeGenModule &CGM) const {
1988 X86_32TargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
1990 addStackProbeSizeTargetAttribute(D, GV, CGM);
1993 class WinX86_64TargetCodeGenInfo : public TargetCodeGenInfo {
1995 WinX86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT,
1996 X86AVXABILevel AVXLevel)
1997 : TargetCodeGenInfo(new WinX86_64ABIInfo(CGT)) {}
1999 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
2000 CodeGen::CodeGenModule &CGM) const override;
2002 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
2006 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
2007 llvm::Value *Address) const override {
2008 llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8);
2010 // 0-15 are the 16 integer registers.
2012 AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 16);
2016 void getDependentLibraryOption(llvm::StringRef Lib,
2017 llvm::SmallString<24> &Opt) const override {
2018 Opt = "/DEFAULTLIB:";
2019 Opt += qualifyWindowsLibrary(Lib);
2022 void getDetectMismatchOption(llvm::StringRef Name,
2023 llvm::StringRef Value,
2024 llvm::SmallString<32> &Opt) const override {
2025 Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\"";
2029 void WinX86_64TargetCodeGenInfo::setTargetAttributes(const Decl *D,
2030 llvm::GlobalValue *GV,
2031 CodeGen::CodeGenModule &CGM) const {
2032 TargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
2034 addStackProbeSizeTargetAttribute(D, GV, CGM);
2038 void X86_64ABIInfo::postMerge(unsigned AggregateSize, Class &Lo,
2040 // AMD64-ABI 3.2.3p2: Rule 5. Then a post merger cleanup is done:
2042 // (a) If one of the classes is Memory, the whole argument is passed in
2045 // (b) If X87UP is not preceded by X87, the whole argument is passed in
2048 // (c) If the size of the aggregate exceeds two eightbytes and the first
2049 // eightbyte isn't SSE or any other eightbyte isn't SSEUP, the whole
2050 // argument is passed in memory. NOTE: This is necessary to keep the
2051 // ABI working for processors that don't support the __m256 type.
2053 // (d) If SSEUP is not preceded by SSE or SSEUP, it is converted to SSE.
2055 // Some of these are enforced by the merging logic. Others can arise
2056 // only with unions; for example:
2057 // union { _Complex double; unsigned; }
2059 // Note that clauses (b) and (c) were added in 0.98.
2063 if (Hi == X87Up && Lo != X87 && honorsRevision0_98())
2065 if (AggregateSize > 128 && (Lo != SSE || Hi != SSEUp))
2067 if (Hi == SSEUp && Lo != SSE)
2071 X86_64ABIInfo::Class X86_64ABIInfo::merge(Class Accum, Class Field) {
2072 // AMD64-ABI 3.2.3p2: Rule 4. Each field of an object is
2073 // classified recursively so that always two fields are
2074 // considered. The resulting class is calculated according to
2075 // the classes of the fields in the eightbyte:
2077 // (a) If both classes are equal, this is the resulting class.
2079 // (b) If one of the classes is NO_CLASS, the resulting class is
2082 // (c) If one of the classes is MEMORY, the result is the MEMORY
2085 // (d) If one of the classes is INTEGER, the result is the
2088 // (e) If one of the classes is X87, X87UP, COMPLEX_X87 class,
2089 // MEMORY is used as class.
2091 // (f) Otherwise class SSE is used.
2093 // Accum should never be memory (we should have returned) or
2094 // ComplexX87 (because this cannot be passed in a structure).
2095 assert((Accum != Memory && Accum != ComplexX87) &&
2096 "Invalid accumulated classification during merge.");
2097 if (Accum == Field || Field == NoClass)
2099 if (Field == Memory)
2101 if (Accum == NoClass)
2103 if (Accum == Integer || Field == Integer)
2105 if (Field == X87 || Field == X87Up || Field == ComplexX87 ||
2106 Accum == X87 || Accum == X87Up)
2111 void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase,
2112 Class &Lo, Class &Hi, bool isNamedArg) const {
2113 // FIXME: This code can be simplified by introducing a simple value class for
2114 // Class pairs with appropriate constructor methods for the various
2117 // FIXME: Some of the split computations are wrong; unaligned vectors
2118 // shouldn't be passed in registers for example, so there is no chance they
2119 // can straddle an eightbyte. Verify & simplify.
2123 Class &Current = OffsetBase < 64 ? Lo : Hi;
2126 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
2127 BuiltinType::Kind k = BT->getKind();
2129 if (k == BuiltinType::Void) {
2131 } else if (k == BuiltinType::Int128 || k == BuiltinType::UInt128) {
2134 } else if (k >= BuiltinType::Bool && k <= BuiltinType::LongLong) {
2136 } else if (k == BuiltinType::Float || k == BuiltinType::Double) {
2138 } else if (k == BuiltinType::LongDouble) {
2139 const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat();
2140 if (LDF == &llvm::APFloat::IEEEquad) {
2143 } else if (LDF == &llvm::APFloat::x87DoubleExtended) {
2146 } else if (LDF == &llvm::APFloat::IEEEdouble) {
2149 llvm_unreachable("unexpected long double representation!");
2151 // FIXME: _Decimal32 and _Decimal64 are SSE.
2152 // FIXME: _float128 and _Decimal128 are (SSE, SSEUp).
2156 if (const EnumType *ET = Ty->getAs<EnumType>()) {
2157 // Classify the underlying integer type.
2158 classify(ET->getDecl()->getIntegerType(), OffsetBase, Lo, Hi, isNamedArg);
2162 if (Ty->hasPointerRepresentation()) {
2167 if (Ty->isMemberPointerType()) {
2168 if (Ty->isMemberFunctionPointerType()) {
2169 if (Has64BitPointers) {
2170 // If Has64BitPointers, this is an {i64, i64}, so classify both
2174 // Otherwise, with 32-bit pointers, this is an {i32, i32}. If that
2175 // straddles an eightbyte boundary, Hi should be classified as well.
2176 uint64_t EB_FuncPtr = (OffsetBase) / 64;
2177 uint64_t EB_ThisAdj = (OffsetBase + 64 - 1) / 64;
2178 if (EB_FuncPtr != EB_ThisAdj) {
2190 if (const VectorType *VT = Ty->getAs<VectorType>()) {
2191 uint64_t Size = getContext().getTypeSize(VT);
2192 if (Size == 1 || Size == 8 || Size == 16 || Size == 32) {
2193 // gcc passes the following as integer:
2194 // 4 bytes - <4 x char>, <2 x short>, <1 x int>, <1 x float>
2195 // 2 bytes - <2 x char>, <1 x short>
2196 // 1 byte - <1 x char>
2199 // If this type crosses an eightbyte boundary, it should be
2201 uint64_t EB_Lo = (OffsetBase) / 64;
2202 uint64_t EB_Hi = (OffsetBase + Size - 1) / 64;
2205 } else if (Size == 64) {
2206 // gcc passes <1 x double> in memory. :(
2207 if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::Double))
2210 // gcc passes <1 x long long> as INTEGER.
2211 if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::LongLong) ||
2212 VT->getElementType()->isSpecificBuiltinType(BuiltinType::ULongLong) ||
2213 VT->getElementType()->isSpecificBuiltinType(BuiltinType::Long) ||
2214 VT->getElementType()->isSpecificBuiltinType(BuiltinType::ULong))
2219 // If this type crosses an eightbyte boundary, it should be
2221 if (OffsetBase && OffsetBase != 64)
2223 } else if (Size == 128 ||
2224 (isNamedArg && Size <= getNativeVectorSizeForAVXABI(AVXLevel))) {
2225 // Arguments of 256-bits are split into four eightbyte chunks. The
2226 // least significant one belongs to class SSE and all the others to class
2227 // SSEUP. The original Lo and Hi design considers that types can't be
2228 // greater than 128-bits, so a 64-bit split in Hi and Lo makes sense.
2229 // This design isn't correct for 256-bits, but since there're no cases
2230 // where the upper parts would need to be inspected, avoid adding
2231 // complexity and just consider Hi to match the 64-256 part.
2233 // Note that per 3.5.7 of AMD64-ABI, 256-bit args are only passed in
2234 // registers if they are "named", i.e. not part of the "..." of a
2235 // variadic function.
2237 // Similarly, per 3.2.3. of the AVX512 draft, 512-bits ("named") args are
2238 // split into eight eightbyte chunks, one SSE and seven SSEUP.
2245 if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
2246 QualType ET = getContext().getCanonicalType(CT->getElementType());
2248 uint64_t Size = getContext().getTypeSize(Ty);
2249 if (ET->isIntegralOrEnumerationType()) {
2252 else if (Size <= 128)
2254 } else if (ET == getContext().FloatTy) {
2256 } else if (ET == getContext().DoubleTy) {
2258 } else if (ET == getContext().LongDoubleTy) {
2259 const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat();
2260 if (LDF == &llvm::APFloat::IEEEquad)
2262 else if (LDF == &llvm::APFloat::x87DoubleExtended)
2263 Current = ComplexX87;
2264 else if (LDF == &llvm::APFloat::IEEEdouble)
2267 llvm_unreachable("unexpected long double representation!");
2270 // If this complex type crosses an eightbyte boundary then it
2272 uint64_t EB_Real = (OffsetBase) / 64;
2273 uint64_t EB_Imag = (OffsetBase + getContext().getTypeSize(ET)) / 64;
2274 if (Hi == NoClass && EB_Real != EB_Imag)
2280 if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
2281 // Arrays are treated like structures.
2283 uint64_t Size = getContext().getTypeSize(Ty);
2285 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
2286 // than four eightbytes, ..., it has class MEMORY.
2290 // AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned
2291 // fields, it has class MEMORY.
2293 // Only need to check alignment of array base.
2294 if (OffsetBase % getContext().getTypeAlign(AT->getElementType()))
2297 // Otherwise implement simplified merge. We could be smarter about
2298 // this, but it isn't worth it and would be harder to verify.
2300 uint64_t EltSize = getContext().getTypeSize(AT->getElementType());
2301 uint64_t ArraySize = AT->getSize().getZExtValue();
2303 // The only case a 256-bit wide vector could be used is when the array
2304 // contains a single 256-bit element. Since Lo and Hi logic isn't extended
2305 // to work for sizes wider than 128, early check and fallback to memory.
2306 if (Size > 128 && EltSize != 256)
2309 for (uint64_t i=0, Offset=OffsetBase; i<ArraySize; ++i, Offset += EltSize) {
2310 Class FieldLo, FieldHi;
2311 classify(AT->getElementType(), Offset, FieldLo, FieldHi, isNamedArg);
2312 Lo = merge(Lo, FieldLo);
2313 Hi = merge(Hi, FieldHi);
2314 if (Lo == Memory || Hi == Memory)
2318 postMerge(Size, Lo, Hi);
2319 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp array classification.");
2323 if (const RecordType *RT = Ty->getAs<RecordType>()) {
2324 uint64_t Size = getContext().getTypeSize(Ty);
2326 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
2327 // than four eightbytes, ..., it has class MEMORY.
2331 // AMD64-ABI 3.2.3p2: Rule 2. If a C++ object has either a non-trivial
2332 // copy constructor or a non-trivial destructor, it is passed by invisible
2334 if (getRecordArgABI(RT, getCXXABI()))
2337 const RecordDecl *RD = RT->getDecl();
2339 // Assume variable sized types are passed in memory.
2340 if (RD->hasFlexibleArrayMember())
2343 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
2345 // Reset Lo class, this will be recomputed.
2348 // If this is a C++ record, classify the bases first.
2349 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
2350 for (const auto &I : CXXRD->bases()) {
2351 assert(!I.isVirtual() && !I.getType()->isDependentType() &&
2352 "Unexpected base class!");
2353 const CXXRecordDecl *Base =
2354 cast<CXXRecordDecl>(I.getType()->getAs<RecordType>()->getDecl());
2356 // Classify this field.
2358 // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate exceeds a
2359 // single eightbyte, each is classified separately. Each eightbyte gets
2360 // initialized to class NO_CLASS.
2361 Class FieldLo, FieldHi;
2363 OffsetBase + getContext().toBits(Layout.getBaseClassOffset(Base));
2364 classify(I.getType(), Offset, FieldLo, FieldHi, isNamedArg);
2365 Lo = merge(Lo, FieldLo);
2366 Hi = merge(Hi, FieldHi);
2367 if (Lo == Memory || Hi == Memory) {
2368 postMerge(Size, Lo, Hi);
2374 // Classify the fields one at a time, merging the results.
2376 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
2377 i != e; ++i, ++idx) {
2378 uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx);
2379 bool BitField = i->isBitField();
2381 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger than
2382 // four eightbytes, or it contains unaligned fields, it has class MEMORY.
2384 // The only case a 256-bit wide vector could be used is when the struct
2385 // contains a single 256-bit element. Since Lo and Hi logic isn't extended
2386 // to work for sizes wider than 128, early check and fallback to memory.
2388 if (Size > 128 && getContext().getTypeSize(i->getType()) != 256) {
2390 postMerge(Size, Lo, Hi);
2393 // Note, skip this test for bit-fields, see below.
2394 if (!BitField && Offset % getContext().getTypeAlign(i->getType())) {
2396 postMerge(Size, Lo, Hi);
2400 // Classify this field.
2402 // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate
2403 // exceeds a single eightbyte, each is classified
2404 // separately. Each eightbyte gets initialized to class
2406 Class FieldLo, FieldHi;
2408 // Bit-fields require special handling, they do not force the
2409 // structure to be passed in memory even if unaligned, and
2410 // therefore they can straddle an eightbyte.
2412 // Ignore padding bit-fields.
2413 if (i->isUnnamedBitfield())
2416 uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx);
2417 uint64_t Size = i->getBitWidthValue(getContext());
2419 uint64_t EB_Lo = Offset / 64;
2420 uint64_t EB_Hi = (Offset + Size - 1) / 64;
2423 assert(EB_Hi == EB_Lo && "Invalid classification, type > 16 bytes.");
2428 FieldHi = EB_Hi ? Integer : NoClass;
2431 classify(i->getType(), Offset, FieldLo, FieldHi, isNamedArg);
2432 Lo = merge(Lo, FieldLo);
2433 Hi = merge(Hi, FieldHi);
2434 if (Lo == Memory || Hi == Memory)
2438 postMerge(Size, Lo, Hi);
2442 ABIArgInfo X86_64ABIInfo::getIndirectReturnResult(QualType Ty) const {
2443 // If this is a scalar LLVM value then assume LLVM will pass it in the right
2445 if (!isAggregateTypeForABI(Ty)) {
2446 // Treat an enum type as its underlying type.
2447 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
2448 Ty = EnumTy->getDecl()->getIntegerType();
2450 return (Ty->isPromotableIntegerType() ?
2451 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
2454 return getNaturalAlignIndirect(Ty);
2457 bool X86_64ABIInfo::IsIllegalVectorType(QualType Ty) const {
2458 if (const VectorType *VecTy = Ty->getAs<VectorType>()) {
2459 uint64_t Size = getContext().getTypeSize(VecTy);
2460 unsigned LargestVector = getNativeVectorSizeForAVXABI(AVXLevel);
2461 if (Size <= 64 || Size > LargestVector)
2468 ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty,
2469 unsigned freeIntRegs) const {
2470 // If this is a scalar LLVM value then assume LLVM will pass it in the right
2473 // This assumption is optimistic, as there could be free registers available
2474 // when we need to pass this argument in memory, and LLVM could try to pass
2475 // the argument in the free register. This does not seem to happen currently,
2476 // but this code would be much safer if we could mark the argument with
2477 // 'onstack'. See PR12193.
2478 if (!isAggregateTypeForABI(Ty) && !IsIllegalVectorType(Ty)) {
2479 // Treat an enum type as its underlying type.
2480 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
2481 Ty = EnumTy->getDecl()->getIntegerType();
2483 return (Ty->isPromotableIntegerType() ?
2484 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
2487 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
2488 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
2490 // Compute the byval alignment. We specify the alignment of the byval in all
2491 // cases so that the mid-level optimizer knows the alignment of the byval.
2492 unsigned Align = std::max(getContext().getTypeAlign(Ty) / 8, 8U);
2494 // Attempt to avoid passing indirect results using byval when possible. This
2495 // is important for good codegen.
2497 // We do this by coercing the value into a scalar type which the backend can
2498 // handle naturally (i.e., without using byval).
2500 // For simplicity, we currently only do this when we have exhausted all of the
2501 // free integer registers. Doing this when there are free integer registers
2502 // would require more care, as we would have to ensure that the coerced value
2503 // did not claim the unused register. That would require either reording the
2504 // arguments to the function (so that any subsequent inreg values came first),
2505 // or only doing this optimization when there were no following arguments that
2508 // We currently expect it to be rare (particularly in well written code) for
2509 // arguments to be passed on the stack when there are still free integer
2510 // registers available (this would typically imply large structs being passed
2511 // by value), so this seems like a fair tradeoff for now.
2513 // We can revisit this if the backend grows support for 'onstack' parameter
2514 // attributes. See PR12193.
2515 if (freeIntRegs == 0) {
2516 uint64_t Size = getContext().getTypeSize(Ty);
2518 // If this type fits in an eightbyte, coerce it into the matching integral
2519 // type, which will end up on the stack (with alignment 8).
2520 if (Align == 8 && Size <= 64)
2521 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
2525 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(Align));
2528 /// The ABI specifies that a value should be passed in a full vector XMM/YMM
2529 /// register. Pick an LLVM IR type that will be passed as a vector register.
2530 llvm::Type *X86_64ABIInfo::GetByteVectorType(QualType Ty) const {
2531 // Wrapper structs/arrays that only contain vectors are passed just like
2532 // vectors; strip them off if present.
2533 if (const Type *InnerTy = isSingleElementStruct(Ty, getContext()))
2534 Ty = QualType(InnerTy, 0);
2536 llvm::Type *IRType = CGT.ConvertType(Ty);
2537 if (isa<llvm::VectorType>(IRType) ||
2538 IRType->getTypeID() == llvm::Type::FP128TyID)
2541 // We couldn't find the preferred IR vector type for 'Ty'.
2542 uint64_t Size = getContext().getTypeSize(Ty);
2543 assert((Size == 128 || Size == 256) && "Invalid type found!");
2545 // Return a LLVM IR vector type based on the size of 'Ty'.
2546 return llvm::VectorType::get(llvm::Type::getDoubleTy(getVMContext()),
2550 /// BitsContainNoUserData - Return true if the specified [start,end) bit range
2551 /// is known to either be off the end of the specified type or being in
2552 /// alignment padding. The user type specified is known to be at most 128 bits
2553 /// in size, and have passed through X86_64ABIInfo::classify with a successful
2554 /// classification that put one of the two halves in the INTEGER class.
2556 /// It is conservatively correct to return false.
2557 static bool BitsContainNoUserData(QualType Ty, unsigned StartBit,
2558 unsigned EndBit, ASTContext &Context) {
2559 // If the bytes being queried are off the end of the type, there is no user
2560 // data hiding here. This handles analysis of builtins, vectors and other
2561 // types that don't contain interesting padding.
2562 unsigned TySize = (unsigned)Context.getTypeSize(Ty);
2563 if (TySize <= StartBit)
2566 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) {
2567 unsigned EltSize = (unsigned)Context.getTypeSize(AT->getElementType());
2568 unsigned NumElts = (unsigned)AT->getSize().getZExtValue();
2570 // Check each element to see if the element overlaps with the queried range.
2571 for (unsigned i = 0; i != NumElts; ++i) {
2572 // If the element is after the span we care about, then we're done..
2573 unsigned EltOffset = i*EltSize;
2574 if (EltOffset >= EndBit) break;
2576 unsigned EltStart = EltOffset < StartBit ? StartBit-EltOffset :0;
2577 if (!BitsContainNoUserData(AT->getElementType(), EltStart,
2578 EndBit-EltOffset, Context))
2581 // If it overlaps no elements, then it is safe to process as padding.
2585 if (const RecordType *RT = Ty->getAs<RecordType>()) {
2586 const RecordDecl *RD = RT->getDecl();
2587 const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
2589 // If this is a C++ record, check the bases first.
2590 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
2591 for (const auto &I : CXXRD->bases()) {
2592 assert(!I.isVirtual() && !I.getType()->isDependentType() &&
2593 "Unexpected base class!");
2594 const CXXRecordDecl *Base =
2595 cast<CXXRecordDecl>(I.getType()->getAs<RecordType>()->getDecl());
2597 // If the base is after the span we care about, ignore it.
2598 unsigned BaseOffset = Context.toBits(Layout.getBaseClassOffset(Base));
2599 if (BaseOffset >= EndBit) continue;
2601 unsigned BaseStart = BaseOffset < StartBit ? StartBit-BaseOffset :0;
2602 if (!BitsContainNoUserData(I.getType(), BaseStart,
2603 EndBit-BaseOffset, Context))
2608 // Verify that no field has data that overlaps the region of interest. Yes
2609 // this could be sped up a lot by being smarter about queried fields,
2610 // however we're only looking at structs up to 16 bytes, so we don't care
2613 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
2614 i != e; ++i, ++idx) {
2615 unsigned FieldOffset = (unsigned)Layout.getFieldOffset(idx);
2617 // If we found a field after the region we care about, then we're done.
2618 if (FieldOffset >= EndBit) break;
2620 unsigned FieldStart = FieldOffset < StartBit ? StartBit-FieldOffset :0;
2621 if (!BitsContainNoUserData(i->getType(), FieldStart, EndBit-FieldOffset,
2626 // If nothing in this record overlapped the area of interest, then we're
2634 /// ContainsFloatAtOffset - Return true if the specified LLVM IR type has a
2635 /// float member at the specified offset. For example, {int,{float}} has a
2636 /// float at offset 4. It is conservatively correct for this routine to return
2638 static bool ContainsFloatAtOffset(llvm::Type *IRType, unsigned IROffset,
2639 const llvm::DataLayout &TD) {
2640 // Base case if we find a float.
2641 if (IROffset == 0 && IRType->isFloatTy())
2644 // If this is a struct, recurse into the field at the specified offset.
2645 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {
2646 const llvm::StructLayout *SL = TD.getStructLayout(STy);
2647 unsigned Elt = SL->getElementContainingOffset(IROffset);
2648 IROffset -= SL->getElementOffset(Elt);
2649 return ContainsFloatAtOffset(STy->getElementType(Elt), IROffset, TD);
2652 // If this is an array, recurse into the field at the specified offset.
2653 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
2654 llvm::Type *EltTy = ATy->getElementType();
2655 unsigned EltSize = TD.getTypeAllocSize(EltTy);
2656 IROffset -= IROffset/EltSize*EltSize;
2657 return ContainsFloatAtOffset(EltTy, IROffset, TD);
2664 /// GetSSETypeAtOffset - Return a type that will be passed by the backend in the
2665 /// low 8 bytes of an XMM register, corresponding to the SSE class.
2666 llvm::Type *X86_64ABIInfo::
2667 GetSSETypeAtOffset(llvm::Type *IRType, unsigned IROffset,
2668 QualType SourceTy, unsigned SourceOffset) const {
2669 // The only three choices we have are either double, <2 x float>, or float. We
2670 // pass as float if the last 4 bytes is just padding. This happens for
2671 // structs that contain 3 floats.
2672 if (BitsContainNoUserData(SourceTy, SourceOffset*8+32,
2673 SourceOffset*8+64, getContext()))
2674 return llvm::Type::getFloatTy(getVMContext());
2676 // We want to pass as <2 x float> if the LLVM IR type contains a float at
2677 // offset+0 and offset+4. Walk the LLVM IR type to find out if this is the
2679 if (ContainsFloatAtOffset(IRType, IROffset, getDataLayout()) &&
2680 ContainsFloatAtOffset(IRType, IROffset+4, getDataLayout()))
2681 return llvm::VectorType::get(llvm::Type::getFloatTy(getVMContext()), 2);
2683 return llvm::Type::getDoubleTy(getVMContext());
2687 /// GetINTEGERTypeAtOffset - The ABI specifies that a value should be passed in
2688 /// an 8-byte GPR. This means that we either have a scalar or we are talking
2689 /// about the high or low part of an up-to-16-byte struct. This routine picks
2690 /// the best LLVM IR type to represent this, which may be i64 or may be anything
2691 /// else that the backend will pass in a GPR that works better (e.g. i8, %foo*,
2694 /// PrefType is an LLVM IR type that corresponds to (part of) the IR type for
2695 /// the source type. IROffset is an offset in bytes into the LLVM IR type that
2696 /// the 8-byte value references. PrefType may be null.
2698 /// SourceTy is the source-level type for the entire argument. SourceOffset is
2699 /// an offset into this that we're processing (which is always either 0 or 8).
2701 llvm::Type *X86_64ABIInfo::
2702 GetINTEGERTypeAtOffset(llvm::Type *IRType, unsigned IROffset,
2703 QualType SourceTy, unsigned SourceOffset) const {
2704 // If we're dealing with an un-offset LLVM IR type, then it means that we're
2705 // returning an 8-byte unit starting with it. See if we can safely use it.
2706 if (IROffset == 0) {
2707 // Pointers and int64's always fill the 8-byte unit.
2708 if ((isa<llvm::PointerType>(IRType) && Has64BitPointers) ||
2709 IRType->isIntegerTy(64))
2712 // If we have a 1/2/4-byte integer, we can use it only if the rest of the
2713 // goodness in the source type is just tail padding. This is allowed to
2714 // kick in for struct {double,int} on the int, but not on
2715 // struct{double,int,int} because we wouldn't return the second int. We
2716 // have to do this analysis on the source type because we can't depend on
2717 // unions being lowered a specific way etc.
2718 if (IRType->isIntegerTy(8) || IRType->isIntegerTy(16) ||
2719 IRType->isIntegerTy(32) ||
2720 (isa<llvm::PointerType>(IRType) && !Has64BitPointers)) {
2721 unsigned BitWidth = isa<llvm::PointerType>(IRType) ? 32 :
2722 cast<llvm::IntegerType>(IRType)->getBitWidth();
2724 if (BitsContainNoUserData(SourceTy, SourceOffset*8+BitWidth,
2725 SourceOffset*8+64, getContext()))
2730 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {
2731 // If this is a struct, recurse into the field at the specified offset.
2732 const llvm::StructLayout *SL = getDataLayout().getStructLayout(STy);
2733 if (IROffset < SL->getSizeInBytes()) {
2734 unsigned FieldIdx = SL->getElementContainingOffset(IROffset);
2735 IROffset -= SL->getElementOffset(FieldIdx);
2737 return GetINTEGERTypeAtOffset(STy->getElementType(FieldIdx), IROffset,
2738 SourceTy, SourceOffset);
2742 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
2743 llvm::Type *EltTy = ATy->getElementType();
2744 unsigned EltSize = getDataLayout().getTypeAllocSize(EltTy);
2745 unsigned EltOffset = IROffset/EltSize*EltSize;
2746 return GetINTEGERTypeAtOffset(EltTy, IROffset-EltOffset, SourceTy,
2750 // Okay, we don't have any better idea of what to pass, so we pass this in an
2751 // integer register that isn't too big to fit the rest of the struct.
2752 unsigned TySizeInBytes =
2753 (unsigned)getContext().getTypeSizeInChars(SourceTy).getQuantity();
2755 assert(TySizeInBytes != SourceOffset && "Empty field?");
2757 // It is always safe to classify this as an integer type up to i64 that
2758 // isn't larger than the structure.
2759 return llvm::IntegerType::get(getVMContext(),
2760 std::min(TySizeInBytes-SourceOffset, 8U)*8);
2764 /// GetX86_64ByValArgumentPair - Given a high and low type that can ideally
2765 /// be used as elements of a two register pair to pass or return, return a
2766 /// first class aggregate to represent them. For example, if the low part of
2767 /// a by-value argument should be passed as i32* and the high part as float,
2768 /// return {i32*, float}.
2770 GetX86_64ByValArgumentPair(llvm::Type *Lo, llvm::Type *Hi,
2771 const llvm::DataLayout &TD) {
2772 // In order to correctly satisfy the ABI, we need to the high part to start
2773 // at offset 8. If the high and low parts we inferred are both 4-byte types
2774 // (e.g. i32 and i32) then the resultant struct type ({i32,i32}) won't have
2775 // the second element at offset 8. Check for this:
2776 unsigned LoSize = (unsigned)TD.getTypeAllocSize(Lo);
2777 unsigned HiAlign = TD.getABITypeAlignment(Hi);
2778 unsigned HiStart = llvm::RoundUpToAlignment(LoSize, HiAlign);
2779 assert(HiStart != 0 && HiStart <= 8 && "Invalid x86-64 argument pair!");
2781 // To handle this, we have to increase the size of the low part so that the
2782 // second element will start at an 8 byte offset. We can't increase the size
2783 // of the second element because it might make us access off the end of the
2786 // There are usually two sorts of types the ABI generation code can produce
2787 // for the low part of a pair that aren't 8 bytes in size: float or
2788 // i8/i16/i32. This can also include pointers when they are 32-bit (X32 and
2790 // Promote these to a larger type.
2791 if (Lo->isFloatTy())
2792 Lo = llvm::Type::getDoubleTy(Lo->getContext());
2794 assert((Lo->isIntegerTy() || Lo->isPointerTy())
2795 && "Invalid/unknown lo type");
2796 Lo = llvm::Type::getInt64Ty(Lo->getContext());
2800 llvm::StructType *Result = llvm::StructType::get(Lo, Hi, nullptr);
2803 // Verify that the second element is at an 8-byte offset.
2804 assert(TD.getStructLayout(Result)->getElementOffset(1) == 8 &&
2805 "Invalid x86-64 argument pair!");
2809 ABIArgInfo X86_64ABIInfo::
2810 classifyReturnType(QualType RetTy) const {
2811 // AMD64-ABI 3.2.3p4: Rule 1. Classify the return type with the
2812 // classification algorithm.
2813 X86_64ABIInfo::Class Lo, Hi;
2814 classify(RetTy, 0, Lo, Hi, /*isNamedArg*/ true);
2816 // Check some invariants.
2817 assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.");
2818 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.");
2820 llvm::Type *ResType = nullptr;
2824 return ABIArgInfo::getIgnore();
2825 // If the low part is just padding, it takes no register, leave ResType
2827 assert((Hi == SSE || Hi == Integer || Hi == X87Up) &&
2828 "Unknown missing lo part");
2833 llvm_unreachable("Invalid classification for lo word.");
2835 // AMD64-ABI 3.2.3p4: Rule 2. Types of class memory are returned via
2838 return getIndirectReturnResult(RetTy);
2840 // AMD64-ABI 3.2.3p4: Rule 3. If the class is INTEGER, the next
2841 // available register of the sequence %rax, %rdx is used.
2843 ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0);
2845 // If we have a sign or zero extended integer, make sure to return Extend
2846 // so that the parameter gets the right LLVM IR attributes.
2847 if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) {
2848 // Treat an enum type as its underlying type.
2849 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
2850 RetTy = EnumTy->getDecl()->getIntegerType();
2852 if (RetTy->isIntegralOrEnumerationType() &&
2853 RetTy->isPromotableIntegerType())
2854 return ABIArgInfo::getExtend();
2858 // AMD64-ABI 3.2.3p4: Rule 4. If the class is SSE, the next
2859 // available SSE register of the sequence %xmm0, %xmm1 is used.
2861 ResType = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0);
2864 // AMD64-ABI 3.2.3p4: Rule 6. If the class is X87, the value is
2865 // returned on the X87 stack in %st0 as 80-bit x87 number.
2867 ResType = llvm::Type::getX86_FP80Ty(getVMContext());
2870 // AMD64-ABI 3.2.3p4: Rule 8. If the class is COMPLEX_X87, the real
2871 // part of the value is returned in %st0 and the imaginary part in
2874 assert(Hi == ComplexX87 && "Unexpected ComplexX87 classification.");
2875 ResType = llvm::StructType::get(llvm::Type::getX86_FP80Ty(getVMContext()),
2876 llvm::Type::getX86_FP80Ty(getVMContext()),
2881 llvm::Type *HighPart = nullptr;
2883 // Memory was handled previously and X87 should
2884 // never occur as a hi class.
2887 llvm_unreachable("Invalid classification for hi word.");
2889 case ComplexX87: // Previously handled.
2894 HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
2895 if (Lo == NoClass) // Return HighPart at offset 8 in memory.
2896 return ABIArgInfo::getDirect(HighPart, 8);
2899 HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
2900 if (Lo == NoClass) // Return HighPart at offset 8 in memory.
2901 return ABIArgInfo::getDirect(HighPart, 8);
2904 // AMD64-ABI 3.2.3p4: Rule 5. If the class is SSEUP, the eightbyte
2905 // is passed in the next available eightbyte chunk if the last used
2908 // SSEUP should always be preceded by SSE, just widen.
2910 assert(Lo == SSE && "Unexpected SSEUp classification.");
2911 ResType = GetByteVectorType(RetTy);
2914 // AMD64-ABI 3.2.3p4: Rule 7. If the class is X87UP, the value is
2915 // returned together with the previous X87 value in %st0.
2917 // If X87Up is preceded by X87, we don't need to do
2918 // anything. However, in some cases with unions it may not be
2919 // preceded by X87. In such situations we follow gcc and pass the
2920 // extra bits in an SSE reg.
2922 HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
2923 if (Lo == NoClass) // Return HighPart at offset 8 in memory.
2924 return ABIArgInfo::getDirect(HighPart, 8);
2929 // If a high part was specified, merge it together with the low part. It is
2930 // known to pass in the high eightbyte of the result. We do this by forming a
2931 // first class struct aggregate with the high and low part: {low, high}
2933 ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getDataLayout());
2935 return ABIArgInfo::getDirect(ResType);
2938 ABIArgInfo X86_64ABIInfo::classifyArgumentType(
2939 QualType Ty, unsigned freeIntRegs, unsigned &neededInt, unsigned &neededSSE,
2943 Ty = useFirstFieldIfTransparentUnion(Ty);
2945 X86_64ABIInfo::Class Lo, Hi;
2946 classify(Ty, 0, Lo, Hi, isNamedArg);
2948 // Check some invariants.
2949 // FIXME: Enforce these by construction.
2950 assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.");
2951 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.");
2955 llvm::Type *ResType = nullptr;
2959 return ABIArgInfo::getIgnore();
2960 // If the low part is just padding, it takes no register, leave ResType
2962 assert((Hi == SSE || Hi == Integer || Hi == X87Up) &&
2963 "Unknown missing lo part");
2966 // AMD64-ABI 3.2.3p3: Rule 1. If the class is MEMORY, pass the argument
2970 // AMD64-ABI 3.2.3p3: Rule 5. If the class is X87, X87UP or
2971 // COMPLEX_X87, it is passed in memory.
2974 if (getRecordArgABI(Ty, getCXXABI()) == CGCXXABI::RAA_Indirect)
2976 return getIndirectResult(Ty, freeIntRegs);
2980 llvm_unreachable("Invalid classification for lo word.");
2982 // AMD64-ABI 3.2.3p3: Rule 2. If the class is INTEGER, the next
2983 // available register of the sequence %rdi, %rsi, %rdx, %rcx, %r8
2988 // Pick an 8-byte type based on the preferred type.
2989 ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 0, Ty, 0);
2991 // If we have a sign or zero extended integer, make sure to return Extend
2992 // so that the parameter gets the right LLVM IR attributes.
2993 if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) {
2994 // Treat an enum type as its underlying type.
2995 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
2996 Ty = EnumTy->getDecl()->getIntegerType();
2998 if (Ty->isIntegralOrEnumerationType() &&
2999 Ty->isPromotableIntegerType())
3000 return ABIArgInfo::getExtend();
3005 // AMD64-ABI 3.2.3p3: Rule 3. If the class is SSE, the next
3006 // available SSE register is used, the registers are taken in the
3007 // order from %xmm0 to %xmm7.
3009 llvm::Type *IRType = CGT.ConvertType(Ty);
3010 ResType = GetSSETypeAtOffset(IRType, 0, Ty, 0);
3016 llvm::Type *HighPart = nullptr;
3018 // Memory was handled previously, ComplexX87 and X87 should
3019 // never occur as hi classes, and X87Up must be preceded by X87,
3020 // which is passed in memory.
3024 llvm_unreachable("Invalid classification for hi word.");
3026 case NoClass: break;
3030 // Pick an 8-byte type based on the preferred type.
3031 HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8);
3033 if (Lo == NoClass) // Pass HighPart at offset 8 in memory.
3034 return ABIArgInfo::getDirect(HighPart, 8);
3037 // X87Up generally doesn't occur here (long double is passed in
3038 // memory), except in situations involving unions.
3041 HighPart = GetSSETypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8);
3043 if (Lo == NoClass) // Pass HighPart at offset 8 in memory.
3044 return ABIArgInfo::getDirect(HighPart, 8);
3049 // AMD64-ABI 3.2.3p3: Rule 4. If the class is SSEUP, the
3050 // eightbyte is passed in the upper half of the last used SSE
3051 // register. This only happens when 128-bit vectors are passed.
3053 assert(Lo == SSE && "Unexpected SSEUp classification");
3054 ResType = GetByteVectorType(Ty);
3058 // If a high part was specified, merge it together with the low part. It is
3059 // known to pass in the high eightbyte of the result. We do this by forming a
3060 // first class struct aggregate with the high and low part: {low, high}
3062 ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getDataLayout());
3064 return ABIArgInfo::getDirect(ResType);
3067 void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const {
3069 if (!getCXXABI().classifyReturnType(FI))
3070 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
3072 // Keep track of the number of assigned registers.
3073 unsigned freeIntRegs = 6, freeSSERegs = 8;
3075 // If the return value is indirect, then the hidden argument is consuming one
3076 // integer register.
3077 if (FI.getReturnInfo().isIndirect())
3080 // The chain argument effectively gives us another free register.
3081 if (FI.isChainCall())
3084 unsigned NumRequiredArgs = FI.getNumRequiredArgs();
3085 // AMD64-ABI 3.2.3p3: Once arguments are classified, the registers
3086 // get assigned (in left-to-right order) for passing as follows...
3088 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
3089 it != ie; ++it, ++ArgNo) {
3090 bool IsNamedArg = ArgNo < NumRequiredArgs;
3092 unsigned neededInt, neededSSE;
3093 it->info = classifyArgumentType(it->type, freeIntRegs, neededInt,
3094 neededSSE, IsNamedArg);
3096 // AMD64-ABI 3.2.3p3: If there are no registers available for any
3097 // eightbyte of an argument, the whole argument is passed on the
3098 // stack. If registers have already been assigned for some
3099 // eightbytes of such an argument, the assignments get reverted.
3100 if (freeIntRegs >= neededInt && freeSSERegs >= neededSSE) {
3101 freeIntRegs -= neededInt;
3102 freeSSERegs -= neededSSE;
3104 it->info = getIndirectResult(it->type, freeIntRegs);
3109 static Address EmitX86_64VAArgFromMemory(CodeGenFunction &CGF,
3110 Address VAListAddr, QualType Ty) {
3111 Address overflow_arg_area_p = CGF.Builder.CreateStructGEP(
3112 VAListAddr, 2, CharUnits::fromQuantity(8), "overflow_arg_area_p");
3113 llvm::Value *overflow_arg_area =
3114 CGF.Builder.CreateLoad(overflow_arg_area_p, "overflow_arg_area");
3116 // AMD64-ABI 3.5.7p5: Step 7. Align l->overflow_arg_area upwards to a 16
3117 // byte boundary if alignment needed by type exceeds 8 byte boundary.
3118 // It isn't stated explicitly in the standard, but in practice we use
3119 // alignment greater than 16 where necessary.
3120 CharUnits Align = CGF.getContext().getTypeAlignInChars(Ty);
3121 if (Align > CharUnits::fromQuantity(8)) {
3122 overflow_arg_area = emitRoundPointerUpToAlignment(CGF, overflow_arg_area,
3126 // AMD64-ABI 3.5.7p5: Step 8. Fetch type from l->overflow_arg_area.
3127 llvm::Type *LTy = CGF.ConvertTypeForMem(Ty);
3129 CGF.Builder.CreateBitCast(overflow_arg_area,
3130 llvm::PointerType::getUnqual(LTy));
3132 // AMD64-ABI 3.5.7p5: Step 9. Set l->overflow_arg_area to:
3133 // l->overflow_arg_area + sizeof(type).
3134 // AMD64-ABI 3.5.7p5: Step 10. Align l->overflow_arg_area upwards to
3135 // an 8 byte boundary.
3137 uint64_t SizeInBytes = (CGF.getContext().getTypeSize(Ty) + 7) / 8;
3138 llvm::Value *Offset =
3139 llvm::ConstantInt::get(CGF.Int32Ty, (SizeInBytes + 7) & ~7);
3140 overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset,
3141 "overflow_arg_area.next");
3142 CGF.Builder.CreateStore(overflow_arg_area, overflow_arg_area_p);
3144 // AMD64-ABI 3.5.7p5: Step 11. Return the fetched type.
3145 return Address(Res, Align);
3148 Address X86_64ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
3149 QualType Ty) const {
3150 // Assume that va_list type is correct; should be pointer to LLVM type:
3154 // i8* overflow_arg_area;
3155 // i8* reg_save_area;
3157 unsigned neededInt, neededSSE;
3159 Ty = getContext().getCanonicalType(Ty);
3160 ABIArgInfo AI = classifyArgumentType(Ty, 0, neededInt, neededSSE,
3161 /*isNamedArg*/false);
3163 // AMD64-ABI 3.5.7p5: Step 1. Determine whether type may be passed
3164 // in the registers. If not go to step 7.
3165 if (!neededInt && !neededSSE)
3166 return EmitX86_64VAArgFromMemory(CGF, VAListAddr, Ty);
3168 // AMD64-ABI 3.5.7p5: Step 2. Compute num_gp to hold the number of
3169 // general purpose registers needed to pass type and num_fp to hold
3170 // the number of floating point registers needed.
3172 // AMD64-ABI 3.5.7p5: Step 3. Verify whether arguments fit into
3173 // registers. In the case: l->gp_offset > 48 - num_gp * 8 or
3174 // l->fp_offset > 304 - num_fp * 16 go to step 7.
3176 // NOTE: 304 is a typo, there are (6 * 8 + 8 * 16) = 176 bytes of
3177 // register save space).
3179 llvm::Value *InRegs = nullptr;
3180 Address gp_offset_p = Address::invalid(), fp_offset_p = Address::invalid();
3181 llvm::Value *gp_offset = nullptr, *fp_offset = nullptr;
3184 CGF.Builder.CreateStructGEP(VAListAddr, 0, CharUnits::Zero(),
3186 gp_offset = CGF.Builder.CreateLoad(gp_offset_p, "gp_offset");
3187 InRegs = llvm::ConstantInt::get(CGF.Int32Ty, 48 - neededInt * 8);
3188 InRegs = CGF.Builder.CreateICmpULE(gp_offset, InRegs, "fits_in_gp");
3193 CGF.Builder.CreateStructGEP(VAListAddr, 1, CharUnits::fromQuantity(4),
3195 fp_offset = CGF.Builder.CreateLoad(fp_offset_p, "fp_offset");
3196 llvm::Value *FitsInFP =
3197 llvm::ConstantInt::get(CGF.Int32Ty, 176 - neededSSE * 16);
3198 FitsInFP = CGF.Builder.CreateICmpULE(fp_offset, FitsInFP, "fits_in_fp");
3199 InRegs = InRegs ? CGF.Builder.CreateAnd(InRegs, FitsInFP) : FitsInFP;
3202 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
3203 llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem");
3204 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
3205 CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock);
3207 // Emit code to load the value if it was passed in registers.
3209 CGF.EmitBlock(InRegBlock);
3211 // AMD64-ABI 3.5.7p5: Step 4. Fetch type from l->reg_save_area with
3212 // an offset of l->gp_offset and/or l->fp_offset. This may require
3213 // copying to a temporary location in case the parameter is passed
3214 // in different register classes or requires an alignment greater
3215 // than 8 for general purpose registers and 16 for XMM registers.
3217 // FIXME: This really results in shameful code when we end up needing to
3218 // collect arguments from different places; often what should result in a
3219 // simple assembling of a structure from scattered addresses has many more
3220 // loads than necessary. Can we clean this up?
3221 llvm::Type *LTy = CGF.ConvertTypeForMem(Ty);
3222 llvm::Value *RegSaveArea = CGF.Builder.CreateLoad(
3223 CGF.Builder.CreateStructGEP(VAListAddr, 3, CharUnits::fromQuantity(16)),
3226 Address RegAddr = Address::invalid();
3227 if (neededInt && neededSSE) {
3229 assert(AI.isDirect() && "Unexpected ABI info for mixed regs");
3230 llvm::StructType *ST = cast<llvm::StructType>(AI.getCoerceToType());
3231 Address Tmp = CGF.CreateMemTemp(Ty);
3232 Tmp = CGF.Builder.CreateElementBitCast(Tmp, ST);
3233 assert(ST->getNumElements() == 2 && "Unexpected ABI info for mixed regs");
3234 llvm::Type *TyLo = ST->getElementType(0);
3235 llvm::Type *TyHi = ST->getElementType(1);
3236 assert((TyLo->isFPOrFPVectorTy() ^ TyHi->isFPOrFPVectorTy()) &&
3237 "Unexpected ABI info for mixed regs");
3238 llvm::Type *PTyLo = llvm::PointerType::getUnqual(TyLo);
3239 llvm::Type *PTyHi = llvm::PointerType::getUnqual(TyHi);
3240 llvm::Value *GPAddr = CGF.Builder.CreateGEP(RegSaveArea, gp_offset);
3241 llvm::Value *FPAddr = CGF.Builder.CreateGEP(RegSaveArea, fp_offset);
3242 llvm::Value *RegLoAddr = TyLo->isFPOrFPVectorTy() ? FPAddr : GPAddr;
3243 llvm::Value *RegHiAddr = TyLo->isFPOrFPVectorTy() ? GPAddr : FPAddr;
3245 // Copy the first element.
3247 CGF.Builder.CreateDefaultAlignedLoad(
3248 CGF.Builder.CreateBitCast(RegLoAddr, PTyLo));
3249 CGF.Builder.CreateStore(V,
3250 CGF.Builder.CreateStructGEP(Tmp, 0, CharUnits::Zero()));
3252 // Copy the second element.
3253 V = CGF.Builder.CreateDefaultAlignedLoad(
3254 CGF.Builder.CreateBitCast(RegHiAddr, PTyHi));
3255 CharUnits Offset = CharUnits::fromQuantity(
3256 getDataLayout().getStructLayout(ST)->getElementOffset(1));
3257 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1, Offset));
3259 RegAddr = CGF.Builder.CreateElementBitCast(Tmp, LTy);
3260 } else if (neededInt) {
3261 RegAddr = Address(CGF.Builder.CreateGEP(RegSaveArea, gp_offset),
3262 CharUnits::fromQuantity(8));
3263 RegAddr = CGF.Builder.CreateElementBitCast(RegAddr, LTy);
3265 // Copy to a temporary if necessary to ensure the appropriate alignment.
3266 std::pair<CharUnits, CharUnits> SizeAlign =
3267 getContext().getTypeInfoInChars(Ty);
3268 uint64_t TySize = SizeAlign.first.getQuantity();
3269 CharUnits TyAlign = SizeAlign.second;
3271 // Copy into a temporary if the type is more aligned than the
3272 // register save area.
3273 if (TyAlign.getQuantity() > 8) {
3274 Address Tmp = CGF.CreateMemTemp(Ty);
3275 CGF.Builder.CreateMemCpy(Tmp, RegAddr, TySize, false);
3279 } else if (neededSSE == 1) {
3280 RegAddr = Address(CGF.Builder.CreateGEP(RegSaveArea, fp_offset),
3281 CharUnits::fromQuantity(16));
3282 RegAddr = CGF.Builder.CreateElementBitCast(RegAddr, LTy);
3284 assert(neededSSE == 2 && "Invalid number of needed registers!");
3285 // SSE registers are spaced 16 bytes apart in the register save
3286 // area, we need to collect the two eightbytes together.
3287 // The ABI isn't explicit about this, but it seems reasonable
3288 // to assume that the slots are 16-byte aligned, since the stack is
3289 // naturally 16-byte aligned and the prologue is expected to store
3290 // all the SSE registers to the RSA.
3291 Address RegAddrLo = Address(CGF.Builder.CreateGEP(RegSaveArea, fp_offset),
3292 CharUnits::fromQuantity(16));
3294 CGF.Builder.CreateConstInBoundsByteGEP(RegAddrLo,
3295 CharUnits::fromQuantity(16));
3296 llvm::Type *DoubleTy = CGF.DoubleTy;
3297 llvm::StructType *ST = llvm::StructType::get(DoubleTy, DoubleTy, nullptr);
3299 Address Tmp = CGF.CreateMemTemp(Ty);
3300 Tmp = CGF.Builder.CreateElementBitCast(Tmp, ST);
3301 V = CGF.Builder.CreateLoad(
3302 CGF.Builder.CreateElementBitCast(RegAddrLo, DoubleTy));
3303 CGF.Builder.CreateStore(V,
3304 CGF.Builder.CreateStructGEP(Tmp, 0, CharUnits::Zero()));
3305 V = CGF.Builder.CreateLoad(
3306 CGF.Builder.CreateElementBitCast(RegAddrHi, DoubleTy));
3307 CGF.Builder.CreateStore(V,
3308 CGF.Builder.CreateStructGEP(Tmp, 1, CharUnits::fromQuantity(8)));
3310 RegAddr = CGF.Builder.CreateElementBitCast(Tmp, LTy);
3313 // AMD64-ABI 3.5.7p5: Step 5. Set:
3314 // l->gp_offset = l->gp_offset + num_gp * 8
3315 // l->fp_offset = l->fp_offset + num_fp * 16.
3317 llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededInt * 8);
3318 CGF.Builder.CreateStore(CGF.Builder.CreateAdd(gp_offset, Offset),
3322 llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededSSE * 16);
3323 CGF.Builder.CreateStore(CGF.Builder.CreateAdd(fp_offset, Offset),
3326 CGF.EmitBranch(ContBlock);
3328 // Emit code to load the value if it was passed in memory.
3330 CGF.EmitBlock(InMemBlock);
3331 Address MemAddr = EmitX86_64VAArgFromMemory(CGF, VAListAddr, Ty);
3333 // Return the appropriate result.
3335 CGF.EmitBlock(ContBlock);
3336 Address ResAddr = emitMergePHI(CGF, RegAddr, InRegBlock, MemAddr, InMemBlock,
3341 Address X86_64ABIInfo::EmitMSVAArg(CodeGenFunction &CGF, Address VAListAddr,
3342 QualType Ty) const {
3343 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*indirect*/ false,
3344 CGF.getContext().getTypeInfoInChars(Ty),
3345 CharUnits::fromQuantity(8),
3346 /*allowHigherAlign*/ false);
3349 ABIArgInfo WinX86_64ABIInfo::classify(QualType Ty, unsigned &FreeSSERegs,
3350 bool IsReturnType) const {
3352 if (Ty->isVoidType())
3353 return ABIArgInfo::getIgnore();
3355 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
3356 Ty = EnumTy->getDecl()->getIntegerType();
3358 TypeInfo Info = getContext().getTypeInfo(Ty);
3359 uint64_t Width = Info.Width;
3360 CharUnits Align = getContext().toCharUnitsFromBits(Info.Align);
3362 const RecordType *RT = Ty->getAs<RecordType>();
3364 if (!IsReturnType) {
3365 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI()))
3366 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
3369 if (RT->getDecl()->hasFlexibleArrayMember())
3370 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
3374 // vectorcall adds the concept of a homogenous vector aggregate, similar to
3376 const Type *Base = nullptr;
3377 uint64_t NumElts = 0;
3378 if (FreeSSERegs && isHomogeneousAggregate(Ty, Base, NumElts)) {
3379 if (FreeSSERegs >= NumElts) {
3380 FreeSSERegs -= NumElts;
3381 if (IsReturnType || Ty->isBuiltinType() || Ty->isVectorType())
3382 return ABIArgInfo::getDirect();
3383 return ABIArgInfo::getExpand();
3385 return ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
3389 if (Ty->isMemberPointerType()) {
3390 // If the member pointer is represented by an LLVM int or ptr, pass it
3392 llvm::Type *LLTy = CGT.ConvertType(Ty);
3393 if (LLTy->isPointerTy() || LLTy->isIntegerTy())
3394 return ABIArgInfo::getDirect();
3397 if (RT || Ty->isAnyComplexType() || Ty->isMemberPointerType()) {
3398 // MS x64 ABI requirement: "Any argument that doesn't fit in 8 bytes, or is
3399 // not 1, 2, 4, or 8 bytes, must be passed by reference."
3400 if (Width > 64 || !llvm::isPowerOf2_64(Width))
3401 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
3403 // Otherwise, coerce it to a small integer.
3404 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Width));
3407 // Bool type is always extended to the ABI, other builtin types are not
3409 const BuiltinType *BT = Ty->getAs<BuiltinType>();
3410 if (BT && BT->getKind() == BuiltinType::Bool)
3411 return ABIArgInfo::getExtend();
3413 // Mingw64 GCC uses the old 80 bit extended precision floating point unit. It
3414 // passes them indirectly through memory.
3415 if (IsMingw64 && BT && BT->getKind() == BuiltinType::LongDouble) {
3416 const llvm::fltSemantics *LDF = &getTarget().getLongDoubleFormat();
3417 if (LDF == &llvm::APFloat::x87DoubleExtended)
3418 return ABIArgInfo::getIndirect(Align, /*ByVal=*/false);
3421 return ABIArgInfo::getDirect();
3424 void WinX86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const {
3426 FI.getCallingConvention() == llvm::CallingConv::X86_VectorCall;
3428 // We can use up to 4 SSE return registers with vectorcall.
3429 unsigned FreeSSERegs = IsVectorCall ? 4 : 0;
3430 if (!getCXXABI().classifyReturnType(FI))
3431 FI.getReturnInfo() = classify(FI.getReturnType(), FreeSSERegs, true);
3433 // We can use up to 6 SSE register parameters with vectorcall.
3434 FreeSSERegs = IsVectorCall ? 6 : 0;
3435 for (auto &I : FI.arguments())
3436 I.info = classify(I.type, FreeSSERegs, false);
3439 Address WinX86_64ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
3440 QualType Ty) const {
3441 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*indirect*/ false,
3442 CGF.getContext().getTypeInfoInChars(Ty),
3443 CharUnits::fromQuantity(8),
3444 /*allowHigherAlign*/ false);
3449 /// PPC32_SVR4_ABIInfo - The 32-bit PowerPC ELF (SVR4) ABI information.
3450 class PPC32_SVR4_ABIInfo : public DefaultABIInfo {
3451 bool IsSoftFloatABI;
3453 PPC32_SVR4_ABIInfo(CodeGen::CodeGenTypes &CGT, bool SoftFloatABI)
3454 : DefaultABIInfo(CGT), IsSoftFloatABI(SoftFloatABI) {}
3456 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
3457 QualType Ty) const override;
3460 class PPC32TargetCodeGenInfo : public TargetCodeGenInfo {
3462 PPC32TargetCodeGenInfo(CodeGenTypes &CGT, bool SoftFloatABI)
3463 : TargetCodeGenInfo(new PPC32_SVR4_ABIInfo(CGT, SoftFloatABI)) {}
3465 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
3466 // This is recovered from gcc output.
3467 return 1; // r1 is the dedicated stack pointer
3470 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
3471 llvm::Value *Address) const override;
3476 Address PPC32_SVR4_ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAList,
3477 QualType Ty) const {
3478 if (const ComplexType *CTy = Ty->getAs<ComplexType>()) {
3479 // TODO: Implement this. For now ignore.
3481 return Address::invalid();
3484 // struct __va_list_tag {
3485 // unsigned char gpr;
3486 // unsigned char fpr;
3487 // unsigned short reserved;
3488 // void *overflow_arg_area;
3489 // void *reg_save_area;
3492 bool isI64 = Ty->isIntegerType() && getContext().getTypeSize(Ty) == 64;
3494 Ty->isIntegerType() || Ty->isPointerType() || Ty->isAggregateType();
3495 bool isF64 = Ty->isFloatingType() && getContext().getTypeSize(Ty) == 64;
3497 // All aggregates are passed indirectly? That doesn't seem consistent
3498 // with the argument-lowering code.
3499 bool isIndirect = Ty->isAggregateType();
3501 CGBuilderTy &Builder = CGF.Builder;
3503 // The calling convention either uses 1-2 GPRs or 1 FPR.
3504 Address NumRegsAddr = Address::invalid();
3505 if (isInt || IsSoftFloatABI) {
3506 NumRegsAddr = Builder.CreateStructGEP(VAList, 0, CharUnits::Zero(), "gpr");
3508 NumRegsAddr = Builder.CreateStructGEP(VAList, 1, CharUnits::One(), "fpr");
3511 llvm::Value *NumRegs = Builder.CreateLoad(NumRegsAddr, "numUsedRegs");
3513 // "Align" the register count when TY is i64.
3514 if (isI64 || (isF64 && IsSoftFloatABI)) {
3515 NumRegs = Builder.CreateAdd(NumRegs, Builder.getInt8(1));
3516 NumRegs = Builder.CreateAnd(NumRegs, Builder.getInt8((uint8_t) ~1U));
3520 Builder.CreateICmpULT(NumRegs, Builder.getInt8(8), "cond");
3522 llvm::BasicBlock *UsingRegs = CGF.createBasicBlock("using_regs");
3523 llvm::BasicBlock *UsingOverflow = CGF.createBasicBlock("using_overflow");
3524 llvm::BasicBlock *Cont = CGF.createBasicBlock("cont");
3526 Builder.CreateCondBr(CC, UsingRegs, UsingOverflow);
3528 llvm::Type *DirectTy = CGF.ConvertType(Ty);
3529 if (isIndirect) DirectTy = DirectTy->getPointerTo(0);
3531 // Case 1: consume registers.
3532 Address RegAddr = Address::invalid();
3534 CGF.EmitBlock(UsingRegs);
3536 Address RegSaveAreaPtr =
3537 Builder.CreateStructGEP(VAList, 4, CharUnits::fromQuantity(8));
3538 RegAddr = Address(Builder.CreateLoad(RegSaveAreaPtr),
3539 CharUnits::fromQuantity(8));
3540 assert(RegAddr.getElementType() == CGF.Int8Ty);
3542 // Floating-point registers start after the general-purpose registers.
3543 if (!(isInt || IsSoftFloatABI)) {
3544 RegAddr = Builder.CreateConstInBoundsByteGEP(RegAddr,
3545 CharUnits::fromQuantity(32));
3548 // Get the address of the saved value by scaling the number of
3549 // registers we've used by the number of
3550 CharUnits RegSize = CharUnits::fromQuantity((isInt || IsSoftFloatABI) ? 4 : 8);
3551 llvm::Value *RegOffset =
3552 Builder.CreateMul(NumRegs, Builder.getInt8(RegSize.getQuantity()));
3553 RegAddr = Address(Builder.CreateInBoundsGEP(CGF.Int8Ty,
3554 RegAddr.getPointer(), RegOffset),
3555 RegAddr.getAlignment().alignmentOfArrayElement(RegSize));
3556 RegAddr = Builder.CreateElementBitCast(RegAddr, DirectTy);
3558 // Increase the used-register count.
3560 Builder.CreateAdd(NumRegs,
3561 Builder.getInt8((isI64 || (isF64 && IsSoftFloatABI)) ? 2 : 1));
3562 Builder.CreateStore(NumRegs, NumRegsAddr);
3564 CGF.EmitBranch(Cont);
3567 // Case 2: consume space in the overflow area.
3568 Address MemAddr = Address::invalid();
3570 CGF.EmitBlock(UsingOverflow);
3572 // Everything in the overflow area is rounded up to a size of at least 4.
3573 CharUnits OverflowAreaAlign = CharUnits::fromQuantity(4);
3577 auto TypeInfo = CGF.getContext().getTypeInfoInChars(Ty);
3578 Size = TypeInfo.first.RoundUpToAlignment(OverflowAreaAlign);
3580 Size = CGF.getPointerSize();
3583 Address OverflowAreaAddr =
3584 Builder.CreateStructGEP(VAList, 3, CharUnits::fromQuantity(4));
3585 Address OverflowArea(Builder.CreateLoad(OverflowAreaAddr, "argp.cur"),
3587 // Round up address of argument to alignment
3588 CharUnits Align = CGF.getContext().getTypeAlignInChars(Ty);
3589 if (Align > OverflowAreaAlign) {
3590 llvm::Value *Ptr = OverflowArea.getPointer();
3591 OverflowArea = Address(emitRoundPointerUpToAlignment(CGF, Ptr, Align),
3595 MemAddr = Builder.CreateElementBitCast(OverflowArea, DirectTy);
3597 // Increase the overflow area.
3598 OverflowArea = Builder.CreateConstInBoundsByteGEP(OverflowArea, Size);
3599 Builder.CreateStore(OverflowArea.getPointer(), OverflowAreaAddr);
3600 CGF.EmitBranch(Cont);
3603 CGF.EmitBlock(Cont);
3605 // Merge the cases with a phi.
3606 Address Result = emitMergePHI(CGF, RegAddr, UsingRegs, MemAddr, UsingOverflow,
3609 // Load the pointer if the argument was passed indirectly.
3611 Result = Address(Builder.CreateLoad(Result, "aggr"),
3612 getContext().getTypeAlignInChars(Ty));
3619 PPC32TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
3620 llvm::Value *Address) const {
3621 // This is calculated from the LLVM and GCC tables and verified
3622 // against gcc output. AFAIK all ABIs use the same encoding.
3624 CodeGen::CGBuilderTy &Builder = CGF.Builder;
3626 llvm::IntegerType *i8 = CGF.Int8Ty;
3627 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
3628 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
3629 llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16);
3631 // 0-31: r0-31, the 4-byte general-purpose registers
3632 AssignToArrayRange(Builder, Address, Four8, 0, 31);
3634 // 32-63: fp0-31, the 8-byte floating-point registers
3635 AssignToArrayRange(Builder, Address, Eight8, 32, 63);
3637 // 64-76 are various 4-byte special-purpose registers:
3644 AssignToArrayRange(Builder, Address, Four8, 64, 76);
3646 // 77-108: v0-31, the 16-byte vector registers
3647 AssignToArrayRange(Builder, Address, Sixteen8, 77, 108);
3654 AssignToArrayRange(Builder, Address, Four8, 109, 113);
3662 /// PPC64_SVR4_ABIInfo - The 64-bit PowerPC ELF (SVR4) ABI information.
3663 class PPC64_SVR4_ABIInfo : public DefaultABIInfo {
3671 static const unsigned GPRBits = 64;
3675 // A vector of float or double will be promoted to <4 x f32> or <4 x f64> and
3676 // will be passed in a QPX register.
3677 bool IsQPXVectorTy(const Type *Ty) const {
3681 if (const VectorType *VT = Ty->getAs<VectorType>()) {
3682 unsigned NumElements = VT->getNumElements();
3683 if (NumElements == 1)
3686 if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::Double)) {
3687 if (getContext().getTypeSize(Ty) <= 256)
3689 } else if (VT->getElementType()->
3690 isSpecificBuiltinType(BuiltinType::Float)) {
3691 if (getContext().getTypeSize(Ty) <= 128)
3699 bool IsQPXVectorTy(QualType Ty) const {
3700 return IsQPXVectorTy(Ty.getTypePtr());
3704 PPC64_SVR4_ABIInfo(CodeGen::CodeGenTypes &CGT, ABIKind Kind, bool HasQPX)
3705 : DefaultABIInfo(CGT), Kind(Kind), HasQPX(HasQPX) {}
3707 bool isPromotableTypeForABI(QualType Ty) const;
3708 CharUnits getParamTypeAlignment(QualType Ty) const;
3710 ABIArgInfo classifyReturnType(QualType RetTy) const;
3711 ABIArgInfo classifyArgumentType(QualType Ty) const;
3713 bool isHomogeneousAggregateBaseType(QualType Ty) const override;
3714 bool isHomogeneousAggregateSmallEnough(const Type *Ty,
3715 uint64_t Members) const override;
3717 // TODO: We can add more logic to computeInfo to improve performance.
3718 // Example: For aggregate arguments that fit in a register, we could
3719 // use getDirectInReg (as is done below for structs containing a single
3720 // floating-point value) to avoid pushing them to memory on function
3721 // entry. This would require changing the logic in PPCISelLowering
3722 // when lowering the parameters in the caller and args in the callee.
3723 void computeInfo(CGFunctionInfo &FI) const override {
3724 if (!getCXXABI().classifyReturnType(FI))
3725 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
3726 for (auto &I : FI.arguments()) {
3727 // We rely on the default argument classification for the most part.
3728 // One exception: An aggregate containing a single floating-point
3729 // or vector item must be passed in a register if one is available.
3730 const Type *T = isSingleElementStruct(I.type, getContext());
3732 const BuiltinType *BT = T->getAs<BuiltinType>();
3733 if (IsQPXVectorTy(T) ||
3734 (T->isVectorType() && getContext().getTypeSize(T) == 128) ||
3735 (BT && BT->isFloatingPoint())) {
3737 I.info = ABIArgInfo::getDirectInReg(CGT.ConvertType(QT));
3741 I.info = classifyArgumentType(I.type);
3745 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
3746 QualType Ty) const override;
3749 class PPC64_SVR4_TargetCodeGenInfo : public TargetCodeGenInfo {
3752 PPC64_SVR4_TargetCodeGenInfo(CodeGenTypes &CGT,
3753 PPC64_SVR4_ABIInfo::ABIKind Kind, bool HasQPX)
3754 : TargetCodeGenInfo(new PPC64_SVR4_ABIInfo(CGT, Kind, HasQPX)) {}
3756 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
3757 // This is recovered from gcc output.
3758 return 1; // r1 is the dedicated stack pointer
3761 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
3762 llvm::Value *Address) const override;
3765 class PPC64TargetCodeGenInfo : public DefaultTargetCodeGenInfo {
3767 PPC64TargetCodeGenInfo(CodeGenTypes &CGT) : DefaultTargetCodeGenInfo(CGT) {}
3769 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
3770 // This is recovered from gcc output.
3771 return 1; // r1 is the dedicated stack pointer
3774 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
3775 llvm::Value *Address) const override;
3780 // Return true if the ABI requires Ty to be passed sign- or zero-
3781 // extended to 64 bits.
3783 PPC64_SVR4_ABIInfo::isPromotableTypeForABI(QualType Ty) const {
3784 // Treat an enum type as its underlying type.
3785 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
3786 Ty = EnumTy->getDecl()->getIntegerType();
3788 // Promotable integer types are required to be promoted by the ABI.
3789 if (Ty->isPromotableIntegerType())
3792 // In addition to the usual promotable integer types, we also need to
3793 // extend all 32-bit types, since the ABI requires promotion to 64 bits.
3794 if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
3795 switch (BT->getKind()) {
3796 case BuiltinType::Int:
3797 case BuiltinType::UInt:
3806 /// isAlignedParamType - Determine whether a type requires 16-byte or
3807 /// higher alignment in the parameter area. Always returns at least 8.
3808 CharUnits PPC64_SVR4_ABIInfo::getParamTypeAlignment(QualType Ty) const {
3809 // Complex types are passed just like their elements.
3810 if (const ComplexType *CTy = Ty->getAs<ComplexType>())
3811 Ty = CTy->getElementType();
3813 // Only vector types of size 16 bytes need alignment (larger types are
3814 // passed via reference, smaller types are not aligned).
3815 if (IsQPXVectorTy(Ty)) {
3816 if (getContext().getTypeSize(Ty) > 128)
3817 return CharUnits::fromQuantity(32);
3819 return CharUnits::fromQuantity(16);
3820 } else if (Ty->isVectorType()) {
3821 return CharUnits::fromQuantity(getContext().getTypeSize(Ty) == 128 ? 16 : 8);
3824 // For single-element float/vector structs, we consider the whole type
3825 // to have the same alignment requirements as its single element.
3826 const Type *AlignAsType = nullptr;
3827 const Type *EltType = isSingleElementStruct(Ty, getContext());
3829 const BuiltinType *BT = EltType->getAs<BuiltinType>();
3830 if (IsQPXVectorTy(EltType) || (EltType->isVectorType() &&
3831 getContext().getTypeSize(EltType) == 128) ||
3832 (BT && BT->isFloatingPoint()))
3833 AlignAsType = EltType;
3836 // Likewise for ELFv2 homogeneous aggregates.
3837 const Type *Base = nullptr;
3838 uint64_t Members = 0;
3839 if (!AlignAsType && Kind == ELFv2 &&
3840 isAggregateTypeForABI(Ty) && isHomogeneousAggregate(Ty, Base, Members))
3843 // With special case aggregates, only vector base types need alignment.
3844 if (AlignAsType && IsQPXVectorTy(AlignAsType)) {
3845 if (getContext().getTypeSize(AlignAsType) > 128)
3846 return CharUnits::fromQuantity(32);
3848 return CharUnits::fromQuantity(16);
3849 } else if (AlignAsType) {
3850 return CharUnits::fromQuantity(AlignAsType->isVectorType() ? 16 : 8);
3853 // Otherwise, we only need alignment for any aggregate type that
3854 // has an alignment requirement of >= 16 bytes.
3855 if (isAggregateTypeForABI(Ty) && getContext().getTypeAlign(Ty) >= 128) {
3856 if (HasQPX && getContext().getTypeAlign(Ty) >= 256)
3857 return CharUnits::fromQuantity(32);
3858 return CharUnits::fromQuantity(16);
3861 return CharUnits::fromQuantity(8);
3864 /// isHomogeneousAggregate - Return true if a type is an ELFv2 homogeneous
3865 /// aggregate. Base is set to the base element type, and Members is set
3866 /// to the number of base elements.
3867 bool ABIInfo::isHomogeneousAggregate(QualType Ty, const Type *&Base,
3868 uint64_t &Members) const {
3869 if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
3870 uint64_t NElements = AT->getSize().getZExtValue();
3873 if (!isHomogeneousAggregate(AT->getElementType(), Base, Members))
3875 Members *= NElements;
3876 } else if (const RecordType *RT = Ty->getAs<RecordType>()) {
3877 const RecordDecl *RD = RT->getDecl();
3878 if (RD->hasFlexibleArrayMember())
3883 // If this is a C++ record, check the bases first.
3884 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
3885 for (const auto &I : CXXRD->bases()) {
3886 // Ignore empty records.
3887 if (isEmptyRecord(getContext(), I.getType(), true))
3890 uint64_t FldMembers;
3891 if (!isHomogeneousAggregate(I.getType(), Base, FldMembers))
3894 Members += FldMembers;
3898 for (const auto *FD : RD->fields()) {
3899 // Ignore (non-zero arrays of) empty records.
3900 QualType FT = FD->getType();
3901 while (const ConstantArrayType *AT =
3902 getContext().getAsConstantArrayType(FT)) {
3903 if (AT->getSize().getZExtValue() == 0)
3905 FT = AT->getElementType();
3907 if (isEmptyRecord(getContext(), FT, true))
3910 // For compatibility with GCC, ignore empty bitfields in C++ mode.
3911 if (getContext().getLangOpts().CPlusPlus &&
3912 FD->isBitField() && FD->getBitWidthValue(getContext()) == 0)
3915 uint64_t FldMembers;
3916 if (!isHomogeneousAggregate(FD->getType(), Base, FldMembers))
3919 Members = (RD->isUnion() ?
3920 std::max(Members, FldMembers) : Members + FldMembers);
3926 // Ensure there is no padding.
3927 if (getContext().getTypeSize(Base) * Members !=
3928 getContext().getTypeSize(Ty))
3932 if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
3934 Ty = CT->getElementType();
3937 // Most ABIs only support float, double, and some vector type widths.
3938 if (!isHomogeneousAggregateBaseType(Ty))
3941 // The base type must be the same for all members. Types that
3942 // agree in both total size and mode (float vs. vector) are
3943 // treated as being equivalent here.
3944 const Type *TyPtr = Ty.getTypePtr();
3948 if (Base->isVectorType() != TyPtr->isVectorType() ||
3949 getContext().getTypeSize(Base) != getContext().getTypeSize(TyPtr))
3952 return Members > 0 && isHomogeneousAggregateSmallEnough(Base, Members);
3955 bool PPC64_SVR4_ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
3956 // Homogeneous aggregates for ELFv2 must have base types of float,
3957 // double, long double, or 128-bit vectors.
3958 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
3959 if (BT->getKind() == BuiltinType::Float ||
3960 BT->getKind() == BuiltinType::Double ||
3961 BT->getKind() == BuiltinType::LongDouble)
3964 if (const VectorType *VT = Ty->getAs<VectorType>()) {
3965 if (getContext().getTypeSize(VT) == 128 || IsQPXVectorTy(Ty))
3971 bool PPC64_SVR4_ABIInfo::isHomogeneousAggregateSmallEnough(
3972 const Type *Base, uint64_t Members) const {
3973 // Vector types require one register, floating point types require one
3974 // or two registers depending on their size.
3976 Base->isVectorType() ? 1 : (getContext().getTypeSize(Base) + 63) / 64;
3978 // Homogeneous Aggregates may occupy at most 8 registers.
3979 return Members * NumRegs <= 8;
3983 PPC64_SVR4_ABIInfo::classifyArgumentType(QualType Ty) const {
3984 Ty = useFirstFieldIfTransparentUnion(Ty);
3986 if (Ty->isAnyComplexType())
3987 return ABIArgInfo::getDirect();
3989 // Non-Altivec vector types are passed in GPRs (smaller than 16 bytes)
3990 // or via reference (larger than 16 bytes).
3991 if (Ty->isVectorType() && !IsQPXVectorTy(Ty)) {
3992 uint64_t Size = getContext().getTypeSize(Ty);
3994 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
3995 else if (Size < 128) {
3996 llvm::Type *CoerceTy = llvm::IntegerType::get(getVMContext(), Size);
3997 return ABIArgInfo::getDirect(CoerceTy);
4001 if (isAggregateTypeForABI(Ty)) {
4002 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
4003 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
4005 uint64_t ABIAlign = getParamTypeAlignment(Ty).getQuantity();
4006 uint64_t TyAlign = getContext().getTypeAlignInChars(Ty).getQuantity();
4008 // ELFv2 homogeneous aggregates are passed as array types.
4009 const Type *Base = nullptr;
4010 uint64_t Members = 0;
4011 if (Kind == ELFv2 &&
4012 isHomogeneousAggregate(Ty, Base, Members)) {
4013 llvm::Type *BaseTy = CGT.ConvertType(QualType(Base, 0));
4014 llvm::Type *CoerceTy = llvm::ArrayType::get(BaseTy, Members);
4015 return ABIArgInfo::getDirect(CoerceTy);
4018 // If an aggregate may end up fully in registers, we do not
4019 // use the ByVal method, but pass the aggregate as array.
4020 // This is usually beneficial since we avoid forcing the
4021 // back-end to store the argument to memory.
4022 uint64_t Bits = getContext().getTypeSize(Ty);
4023 if (Bits > 0 && Bits <= 8 * GPRBits) {
4024 llvm::Type *CoerceTy;
4026 // Types up to 8 bytes are passed as integer type (which will be
4027 // properly aligned in the argument save area doubleword).
4028 if (Bits <= GPRBits)
4029 CoerceTy = llvm::IntegerType::get(getVMContext(),
4030 llvm::RoundUpToAlignment(Bits, 8));
4031 // Larger types are passed as arrays, with the base type selected
4032 // according to the required alignment in the save area.
4034 uint64_t RegBits = ABIAlign * 8;
4035 uint64_t NumRegs = llvm::RoundUpToAlignment(Bits, RegBits) / RegBits;
4036 llvm::Type *RegTy = llvm::IntegerType::get(getVMContext(), RegBits);
4037 CoerceTy = llvm::ArrayType::get(RegTy, NumRegs);
4040 return ABIArgInfo::getDirect(CoerceTy);
4043 // All other aggregates are passed ByVal.
4044 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(ABIAlign),
4046 /*Realign=*/TyAlign > ABIAlign);
4049 return (isPromotableTypeForABI(Ty) ?
4050 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
4054 PPC64_SVR4_ABIInfo::classifyReturnType(QualType RetTy) const {
4055 if (RetTy->isVoidType())
4056 return ABIArgInfo::getIgnore();
4058 if (RetTy->isAnyComplexType())
4059 return ABIArgInfo::getDirect();
4061 // Non-Altivec vector types are returned in GPRs (smaller than 16 bytes)
4062 // or via reference (larger than 16 bytes).
4063 if (RetTy->isVectorType() && !IsQPXVectorTy(RetTy)) {
4064 uint64_t Size = getContext().getTypeSize(RetTy);
4066 return getNaturalAlignIndirect(RetTy);
4067 else if (Size < 128) {
4068 llvm::Type *CoerceTy = llvm::IntegerType::get(getVMContext(), Size);
4069 return ABIArgInfo::getDirect(CoerceTy);
4073 if (isAggregateTypeForABI(RetTy)) {
4074 // ELFv2 homogeneous aggregates are returned as array types.
4075 const Type *Base = nullptr;
4076 uint64_t Members = 0;
4077 if (Kind == ELFv2 &&
4078 isHomogeneousAggregate(RetTy, Base, Members)) {
4079 llvm::Type *BaseTy = CGT.ConvertType(QualType(Base, 0));
4080 llvm::Type *CoerceTy = llvm::ArrayType::get(BaseTy, Members);
4081 return ABIArgInfo::getDirect(CoerceTy);
4084 // ELFv2 small aggregates are returned in up to two registers.
4085 uint64_t Bits = getContext().getTypeSize(RetTy);
4086 if (Kind == ELFv2 && Bits <= 2 * GPRBits) {
4088 return ABIArgInfo::getIgnore();
4090 llvm::Type *CoerceTy;
4091 if (Bits > GPRBits) {
4092 CoerceTy = llvm::IntegerType::get(getVMContext(), GPRBits);
4093 CoerceTy = llvm::StructType::get(CoerceTy, CoerceTy, nullptr);
4095 CoerceTy = llvm::IntegerType::get(getVMContext(),
4096 llvm::RoundUpToAlignment(Bits, 8));
4097 return ABIArgInfo::getDirect(CoerceTy);
4100 // All other aggregates are returned indirectly.
4101 return getNaturalAlignIndirect(RetTy);
4104 return (isPromotableTypeForABI(RetTy) ?
4105 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
4108 // Based on ARMABIInfo::EmitVAArg, adjusted for 64-bit machine.
4109 Address PPC64_SVR4_ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
4110 QualType Ty) const {
4111 auto TypeInfo = getContext().getTypeInfoInChars(Ty);
4112 TypeInfo.second = getParamTypeAlignment(Ty);
4114 CharUnits SlotSize = CharUnits::fromQuantity(8);
4116 // If we have a complex type and the base type is smaller than 8 bytes,
4117 // the ABI calls for the real and imaginary parts to be right-adjusted
4118 // in separate doublewords. However, Clang expects us to produce a
4119 // pointer to a structure with the two parts packed tightly. So generate
4120 // loads of the real and imaginary parts relative to the va_list pointer,
4121 // and store them to a temporary structure.
4122 if (const ComplexType *CTy = Ty->getAs<ComplexType>()) {
4123 CharUnits EltSize = TypeInfo.first / 2;
4124 if (EltSize < SlotSize) {
4125 Address Addr = emitVoidPtrDirectVAArg(CGF, VAListAddr, CGF.Int8Ty,
4126 SlotSize * 2, SlotSize,
4127 SlotSize, /*AllowHigher*/ true);
4129 Address RealAddr = Addr;
4130 Address ImagAddr = RealAddr;
4131 if (CGF.CGM.getDataLayout().isBigEndian()) {
4132 RealAddr = CGF.Builder.CreateConstInBoundsByteGEP(RealAddr,
4133 SlotSize - EltSize);
4134 ImagAddr = CGF.Builder.CreateConstInBoundsByteGEP(ImagAddr,
4135 2 * SlotSize - EltSize);
4137 ImagAddr = CGF.Builder.CreateConstInBoundsByteGEP(RealAddr, SlotSize);
4140 llvm::Type *EltTy = CGF.ConvertTypeForMem(CTy->getElementType());
4141 RealAddr = CGF.Builder.CreateElementBitCast(RealAddr, EltTy);
4142 ImagAddr = CGF.Builder.CreateElementBitCast(ImagAddr, EltTy);
4143 llvm::Value *Real = CGF.Builder.CreateLoad(RealAddr, ".vareal");
4144 llvm::Value *Imag = CGF.Builder.CreateLoad(ImagAddr, ".vaimag");
4146 Address Temp = CGF.CreateMemTemp(Ty, "vacplx");
4147 CGF.EmitStoreOfComplex({Real, Imag}, CGF.MakeAddrLValue(Temp, Ty),
4153 // Otherwise, just use the general rule.
4154 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*Indirect*/ false,
4155 TypeInfo, SlotSize, /*AllowHigher*/ true);
4159 PPC64_initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
4160 llvm::Value *Address) {
4161 // This is calculated from the LLVM and GCC tables and verified
4162 // against gcc output. AFAIK all ABIs use the same encoding.
4164 CodeGen::CGBuilderTy &Builder = CGF.Builder;
4166 llvm::IntegerType *i8 = CGF.Int8Ty;
4167 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
4168 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
4169 llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16);
4171 // 0-31: r0-31, the 8-byte general-purpose registers
4172 AssignToArrayRange(Builder, Address, Eight8, 0, 31);
4174 // 32-63: fp0-31, the 8-byte floating-point registers
4175 AssignToArrayRange(Builder, Address, Eight8, 32, 63);
4177 // 64-76 are various 4-byte special-purpose registers:
4184 AssignToArrayRange(Builder, Address, Four8, 64, 76);
4186 // 77-108: v0-31, the 16-byte vector registers
4187 AssignToArrayRange(Builder, Address, Sixteen8, 77, 108);
4194 AssignToArrayRange(Builder, Address, Four8, 109, 113);
4200 PPC64_SVR4_TargetCodeGenInfo::initDwarfEHRegSizeTable(
4201 CodeGen::CodeGenFunction &CGF,
4202 llvm::Value *Address) const {
4204 return PPC64_initDwarfEHRegSizeTable(CGF, Address);
4208 PPC64TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
4209 llvm::Value *Address) const {
4211 return PPC64_initDwarfEHRegSizeTable(CGF, Address);
4214 //===----------------------------------------------------------------------===//
4215 // AArch64 ABI Implementation
4216 //===----------------------------------------------------------------------===//
4220 class AArch64ABIInfo : public ABIInfo {
4231 AArch64ABIInfo(CodeGenTypes &CGT, ABIKind Kind) : ABIInfo(CGT), Kind(Kind) {}
4234 ABIKind getABIKind() const { return Kind; }
4235 bool isDarwinPCS() const { return Kind == DarwinPCS; }
4237 ABIArgInfo classifyReturnType(QualType RetTy) const;
4238 ABIArgInfo classifyArgumentType(QualType RetTy) const;
4239 bool isHomogeneousAggregateBaseType(QualType Ty) const override;
4240 bool isHomogeneousAggregateSmallEnough(const Type *Ty,
4241 uint64_t Members) const override;
4243 bool isIllegalVectorType(QualType Ty) const;
4245 void computeInfo(CGFunctionInfo &FI) const override {
4246 if (!getCXXABI().classifyReturnType(FI))
4247 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
4249 for (auto &it : FI.arguments())
4250 it.info = classifyArgumentType(it.type);
4253 Address EmitDarwinVAArg(Address VAListAddr, QualType Ty,
4254 CodeGenFunction &CGF) const;
4256 Address EmitAAPCSVAArg(Address VAListAddr, QualType Ty,
4257 CodeGenFunction &CGF) const;
4259 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
4260 QualType Ty) const override {
4261 return isDarwinPCS() ? EmitDarwinVAArg(VAListAddr, Ty, CGF)
4262 : EmitAAPCSVAArg(VAListAddr, Ty, CGF);
4266 class AArch64TargetCodeGenInfo : public TargetCodeGenInfo {
4268 AArch64TargetCodeGenInfo(CodeGenTypes &CGT, AArch64ABIInfo::ABIKind Kind)
4269 : TargetCodeGenInfo(new AArch64ABIInfo(CGT, Kind)) {}
4271 StringRef getARCRetainAutoreleasedReturnValueMarker() const override {
4272 return "mov\tfp, fp\t\t; marker for objc_retainAutoreleaseReturnValue";
4275 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
4279 bool doesReturnSlotInterfereWithArgs() const override { return false; }
4283 ABIArgInfo AArch64ABIInfo::classifyArgumentType(QualType Ty) const {
4284 Ty = useFirstFieldIfTransparentUnion(Ty);
4286 // Handle illegal vector types here.
4287 if (isIllegalVectorType(Ty)) {
4288 uint64_t Size = getContext().getTypeSize(Ty);
4290 llvm::Type *ResType = llvm::Type::getInt32Ty(getVMContext());
4291 return ABIArgInfo::getDirect(ResType);
4294 llvm::Type *ResType =
4295 llvm::VectorType::get(llvm::Type::getInt32Ty(getVMContext()), 2);
4296 return ABIArgInfo::getDirect(ResType);
4299 llvm::Type *ResType =
4300 llvm::VectorType::get(llvm::Type::getInt32Ty(getVMContext()), 4);
4301 return ABIArgInfo::getDirect(ResType);
4303 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
4306 if (!isAggregateTypeForABI(Ty)) {
4307 // Treat an enum type as its underlying type.
4308 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
4309 Ty = EnumTy->getDecl()->getIntegerType();
4311 return (Ty->isPromotableIntegerType() && isDarwinPCS()
4312 ? ABIArgInfo::getExtend()
4313 : ABIArgInfo::getDirect());
4316 // Structures with either a non-trivial destructor or a non-trivial
4317 // copy constructor are always indirect.
4318 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) {
4319 return getNaturalAlignIndirect(Ty, /*ByVal=*/RAA ==
4320 CGCXXABI::RAA_DirectInMemory);
4323 // Empty records are always ignored on Darwin, but actually passed in C++ mode
4324 // elsewhere for GNU compatibility.
4325 if (isEmptyRecord(getContext(), Ty, true)) {
4326 if (!getContext().getLangOpts().CPlusPlus || isDarwinPCS())
4327 return ABIArgInfo::getIgnore();
4329 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
4332 // Homogeneous Floating-point Aggregates (HFAs) need to be expanded.
4333 const Type *Base = nullptr;
4334 uint64_t Members = 0;
4335 if (isHomogeneousAggregate(Ty, Base, Members)) {
4336 return ABIArgInfo::getDirect(
4337 llvm::ArrayType::get(CGT.ConvertType(QualType(Base, 0)), Members));
4340 // Aggregates <= 16 bytes are passed directly in registers or on the stack.
4341 uint64_t Size = getContext().getTypeSize(Ty);
4343 unsigned Alignment = getContext().getTypeAlign(Ty);
4344 Size = 64 * ((Size + 63) / 64); // round up to multiple of 8 bytes
4346 // We use a pair of i64 for 16-byte aggregate with 8-byte alignment.
4347 // For aggregates with 16-byte alignment, we use i128.
4348 if (Alignment < 128 && Size == 128) {
4349 llvm::Type *BaseTy = llvm::Type::getInt64Ty(getVMContext());
4350 return ABIArgInfo::getDirect(llvm::ArrayType::get(BaseTy, Size / 64));
4352 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Size));
4355 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
4358 ABIArgInfo AArch64ABIInfo::classifyReturnType(QualType RetTy) const {
4359 if (RetTy->isVoidType())
4360 return ABIArgInfo::getIgnore();
4362 // Large vector types should be returned via memory.
4363 if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 128)
4364 return getNaturalAlignIndirect(RetTy);
4366 if (!isAggregateTypeForABI(RetTy)) {
4367 // Treat an enum type as its underlying type.
4368 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
4369 RetTy = EnumTy->getDecl()->getIntegerType();
4371 return (RetTy->isPromotableIntegerType() && isDarwinPCS()
4372 ? ABIArgInfo::getExtend()
4373 : ABIArgInfo::getDirect());
4376 if (isEmptyRecord(getContext(), RetTy, true))
4377 return ABIArgInfo::getIgnore();
4379 const Type *Base = nullptr;
4380 uint64_t Members = 0;
4381 if (isHomogeneousAggregate(RetTy, Base, Members))
4382 // Homogeneous Floating-point Aggregates (HFAs) are returned directly.
4383 return ABIArgInfo::getDirect();
4385 // Aggregates <= 16 bytes are returned directly in registers or on the stack.
4386 uint64_t Size = getContext().getTypeSize(RetTy);
4388 unsigned Alignment = getContext().getTypeAlign(RetTy);
4389 Size = 64 * ((Size + 63) / 64); // round up to multiple of 8 bytes
4391 // We use a pair of i64 for 16-byte aggregate with 8-byte alignment.
4392 // For aggregates with 16-byte alignment, we use i128.
4393 if (Alignment < 128 && Size == 128) {
4394 llvm::Type *BaseTy = llvm::Type::getInt64Ty(getVMContext());
4395 return ABIArgInfo::getDirect(llvm::ArrayType::get(BaseTy, Size / 64));
4397 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Size));
4400 return getNaturalAlignIndirect(RetTy);
4403 /// isIllegalVectorType - check whether the vector type is legal for AArch64.
4404 bool AArch64ABIInfo::isIllegalVectorType(QualType Ty) const {
4405 if (const VectorType *VT = Ty->getAs<VectorType>()) {
4406 // Check whether VT is legal.
4407 unsigned NumElements = VT->getNumElements();
4408 uint64_t Size = getContext().getTypeSize(VT);
4409 // NumElements should be power of 2 between 1 and 16.
4410 if ((NumElements & (NumElements - 1)) != 0 || NumElements > 16)
4412 return Size != 64 && (Size != 128 || NumElements == 1);
4417 bool AArch64ABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
4418 // Homogeneous aggregates for AAPCS64 must have base types of a floating
4419 // point type or a short-vector type. This is the same as the 32-bit ABI,
4420 // but with the difference that any floating-point type is allowed,
4421 // including __fp16.
4422 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
4423 if (BT->isFloatingPoint())
4425 } else if (const VectorType *VT = Ty->getAs<VectorType>()) {
4426 unsigned VecSize = getContext().getTypeSize(VT);
4427 if (VecSize == 64 || VecSize == 128)
4433 bool AArch64ABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base,
4434 uint64_t Members) const {
4435 return Members <= 4;
4438 Address AArch64ABIInfo::EmitAAPCSVAArg(Address VAListAddr,
4440 CodeGenFunction &CGF) const {
4441 ABIArgInfo AI = classifyArgumentType(Ty);
4442 bool IsIndirect = AI.isIndirect();
4444 llvm::Type *BaseTy = CGF.ConvertType(Ty);
4446 BaseTy = llvm::PointerType::getUnqual(BaseTy);
4447 else if (AI.getCoerceToType())
4448 BaseTy = AI.getCoerceToType();
4450 unsigned NumRegs = 1;
4451 if (llvm::ArrayType *ArrTy = dyn_cast<llvm::ArrayType>(BaseTy)) {
4452 BaseTy = ArrTy->getElementType();
4453 NumRegs = ArrTy->getNumElements();
4455 bool IsFPR = BaseTy->isFloatingPointTy() || BaseTy->isVectorTy();
4457 // The AArch64 va_list type and handling is specified in the Procedure Call
4458 // Standard, section B.4:
4468 llvm::BasicBlock *MaybeRegBlock = CGF.createBasicBlock("vaarg.maybe_reg");
4469 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
4470 llvm::BasicBlock *OnStackBlock = CGF.createBasicBlock("vaarg.on_stack");
4471 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
4473 auto TyInfo = getContext().getTypeInfoInChars(Ty);
4474 CharUnits TyAlign = TyInfo.second;
4476 Address reg_offs_p = Address::invalid();
4477 llvm::Value *reg_offs = nullptr;
4479 CharUnits reg_top_offset;
4480 int RegSize = IsIndirect ? 8 : TyInfo.first.getQuantity();
4482 // 3 is the field number of __gr_offs
4484 CGF.Builder.CreateStructGEP(VAListAddr, 3, CharUnits::fromQuantity(24),
4486 reg_offs = CGF.Builder.CreateLoad(reg_offs_p, "gr_offs");
4487 reg_top_index = 1; // field number for __gr_top
4488 reg_top_offset = CharUnits::fromQuantity(8);
4489 RegSize = llvm::RoundUpToAlignment(RegSize, 8);
4491 // 4 is the field number of __vr_offs.
4493 CGF.Builder.CreateStructGEP(VAListAddr, 4, CharUnits::fromQuantity(28),
4495 reg_offs = CGF.Builder.CreateLoad(reg_offs_p, "vr_offs");
4496 reg_top_index = 2; // field number for __vr_top
4497 reg_top_offset = CharUnits::fromQuantity(16);
4498 RegSize = 16 * NumRegs;
4501 //=======================================
4502 // Find out where argument was passed
4503 //=======================================
4505 // If reg_offs >= 0 we're already using the stack for this type of
4506 // argument. We don't want to keep updating reg_offs (in case it overflows,
4507 // though anyone passing 2GB of arguments, each at most 16 bytes, deserves
4508 // whatever they get).
4509 llvm::Value *UsingStack = nullptr;
4510 UsingStack = CGF.Builder.CreateICmpSGE(
4511 reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, 0));
4513 CGF.Builder.CreateCondBr(UsingStack, OnStackBlock, MaybeRegBlock);
4515 // Otherwise, at least some kind of argument could go in these registers, the
4516 // question is whether this particular type is too big.
4517 CGF.EmitBlock(MaybeRegBlock);
4519 // Integer arguments may need to correct register alignment (for example a
4520 // "struct { __int128 a; };" gets passed in x_2N, x_{2N+1}). In this case we
4521 // align __gr_offs to calculate the potential address.
4522 if (!IsFPR && !IsIndirect && TyAlign.getQuantity() > 8) {
4523 int Align = TyAlign.getQuantity();
4525 reg_offs = CGF.Builder.CreateAdd(
4526 reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, Align - 1),
4528 reg_offs = CGF.Builder.CreateAnd(
4529 reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, -Align),
4533 // Update the gr_offs/vr_offs pointer for next call to va_arg on this va_list.
4534 // The fact that this is done unconditionally reflects the fact that
4535 // allocating an argument to the stack also uses up all the remaining
4536 // registers of the appropriate kind.
4537 llvm::Value *NewOffset = nullptr;
4538 NewOffset = CGF.Builder.CreateAdd(
4539 reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, RegSize), "new_reg_offs");
4540 CGF.Builder.CreateStore(NewOffset, reg_offs_p);
4542 // Now we're in a position to decide whether this argument really was in
4543 // registers or not.
4544 llvm::Value *InRegs = nullptr;
4545 InRegs = CGF.Builder.CreateICmpSLE(
4546 NewOffset, llvm::ConstantInt::get(CGF.Int32Ty, 0), "inreg");
4548 CGF.Builder.CreateCondBr(InRegs, InRegBlock, OnStackBlock);
4550 //=======================================
4551 // Argument was in registers
4552 //=======================================
4554 // Now we emit the code for if the argument was originally passed in
4555 // registers. First start the appropriate block:
4556 CGF.EmitBlock(InRegBlock);
4558 llvm::Value *reg_top = nullptr;
4559 Address reg_top_p = CGF.Builder.CreateStructGEP(VAListAddr, reg_top_index,
4560 reg_top_offset, "reg_top_p");
4561 reg_top = CGF.Builder.CreateLoad(reg_top_p, "reg_top");
4562 Address BaseAddr(CGF.Builder.CreateInBoundsGEP(reg_top, reg_offs),
4563 CharUnits::fromQuantity(IsFPR ? 16 : 8));
4564 Address RegAddr = Address::invalid();
4565 llvm::Type *MemTy = CGF.ConvertTypeForMem(Ty);
4568 // If it's been passed indirectly (actually a struct), whatever we find from
4569 // stored registers or on the stack will actually be a struct **.
4570 MemTy = llvm::PointerType::getUnqual(MemTy);
4573 const Type *Base = nullptr;
4574 uint64_t NumMembers = 0;
4575 bool IsHFA = isHomogeneousAggregate(Ty, Base, NumMembers);
4576 if (IsHFA && NumMembers > 1) {
4577 // Homogeneous aggregates passed in registers will have their elements split
4578 // and stored 16-bytes apart regardless of size (they're notionally in qN,
4579 // qN+1, ...). We reload and store into a temporary local variable
4581 assert(!IsIndirect && "Homogeneous aggregates should be passed directly");
4582 auto BaseTyInfo = getContext().getTypeInfoInChars(QualType(Base, 0));
4583 llvm::Type *BaseTy = CGF.ConvertType(QualType(Base, 0));
4584 llvm::Type *HFATy = llvm::ArrayType::get(BaseTy, NumMembers);
4585 Address Tmp = CGF.CreateTempAlloca(HFATy,
4586 std::max(TyAlign, BaseTyInfo.second));
4588 // On big-endian platforms, the value will be right-aligned in its slot.
4590 if (CGF.CGM.getDataLayout().isBigEndian() &&
4591 BaseTyInfo.first.getQuantity() < 16)
4592 Offset = 16 - BaseTyInfo.first.getQuantity();
4594 for (unsigned i = 0; i < NumMembers; ++i) {
4595 CharUnits BaseOffset = CharUnits::fromQuantity(16 * i + Offset);
4597 CGF.Builder.CreateConstInBoundsByteGEP(BaseAddr, BaseOffset);
4598 LoadAddr = CGF.Builder.CreateElementBitCast(LoadAddr, BaseTy);
4601 CGF.Builder.CreateConstArrayGEP(Tmp, i, BaseTyInfo.first);
4603 llvm::Value *Elem = CGF.Builder.CreateLoad(LoadAddr);
4604 CGF.Builder.CreateStore(Elem, StoreAddr);
4607 RegAddr = CGF.Builder.CreateElementBitCast(Tmp, MemTy);
4609 // Otherwise the object is contiguous in memory.
4611 // It might be right-aligned in its slot.
4612 CharUnits SlotSize = BaseAddr.getAlignment();
4613 if (CGF.CGM.getDataLayout().isBigEndian() && !IsIndirect &&
4614 (IsHFA || !isAggregateTypeForABI(Ty)) &&
4615 TyInfo.first < SlotSize) {
4616 CharUnits Offset = SlotSize - TyInfo.first;
4617 BaseAddr = CGF.Builder.CreateConstInBoundsByteGEP(BaseAddr, Offset);
4620 RegAddr = CGF.Builder.CreateElementBitCast(BaseAddr, MemTy);
4623 CGF.EmitBranch(ContBlock);
4625 //=======================================
4626 // Argument was on the stack
4627 //=======================================
4628 CGF.EmitBlock(OnStackBlock);
4630 Address stack_p = CGF.Builder.CreateStructGEP(VAListAddr, 0,
4631 CharUnits::Zero(), "stack_p");
4632 llvm::Value *OnStackPtr = CGF.Builder.CreateLoad(stack_p, "stack");
4634 // Again, stack arguments may need realignment. In this case both integer and
4635 // floating-point ones might be affected.
4636 if (!IsIndirect && TyAlign.getQuantity() > 8) {
4637 int Align = TyAlign.getQuantity();
4639 OnStackPtr = CGF.Builder.CreatePtrToInt(OnStackPtr, CGF.Int64Ty);
4641 OnStackPtr = CGF.Builder.CreateAdd(
4642 OnStackPtr, llvm::ConstantInt::get(CGF.Int64Ty, Align - 1),
4644 OnStackPtr = CGF.Builder.CreateAnd(
4645 OnStackPtr, llvm::ConstantInt::get(CGF.Int64Ty, -Align),
4648 OnStackPtr = CGF.Builder.CreateIntToPtr(OnStackPtr, CGF.Int8PtrTy);
4650 Address OnStackAddr(OnStackPtr,
4651 std::max(CharUnits::fromQuantity(8), TyAlign));
4653 // All stack slots are multiples of 8 bytes.
4654 CharUnits StackSlotSize = CharUnits::fromQuantity(8);
4655 CharUnits StackSize;
4657 StackSize = StackSlotSize;
4659 StackSize = TyInfo.first.RoundUpToAlignment(StackSlotSize);
4661 llvm::Value *StackSizeC = CGF.Builder.getSize(StackSize);
4662 llvm::Value *NewStack =
4663 CGF.Builder.CreateInBoundsGEP(OnStackPtr, StackSizeC, "new_stack");
4665 // Write the new value of __stack for the next call to va_arg
4666 CGF.Builder.CreateStore(NewStack, stack_p);
4668 if (CGF.CGM.getDataLayout().isBigEndian() && !isAggregateTypeForABI(Ty) &&
4669 TyInfo.first < StackSlotSize) {
4670 CharUnits Offset = StackSlotSize - TyInfo.first;
4671 OnStackAddr = CGF.Builder.CreateConstInBoundsByteGEP(OnStackAddr, Offset);
4674 OnStackAddr = CGF.Builder.CreateElementBitCast(OnStackAddr, MemTy);
4676 CGF.EmitBranch(ContBlock);
4678 //=======================================
4680 //=======================================
4681 CGF.EmitBlock(ContBlock);
4683 Address ResAddr = emitMergePHI(CGF, RegAddr, InRegBlock,
4684 OnStackAddr, OnStackBlock, "vaargs.addr");
4687 return Address(CGF.Builder.CreateLoad(ResAddr, "vaarg.addr"),
4693 Address AArch64ABIInfo::EmitDarwinVAArg(Address VAListAddr, QualType Ty,
4694 CodeGenFunction &CGF) const {
4695 // The backend's lowering doesn't support va_arg for aggregates or
4696 // illegal vector types. Lower VAArg here for these cases and use
4697 // the LLVM va_arg instruction for everything else.
4698 if (!isAggregateTypeForABI(Ty) && !isIllegalVectorType(Ty))
4699 return Address::invalid();
4701 CharUnits SlotSize = CharUnits::fromQuantity(8);
4703 // Empty records are ignored for parameter passing purposes.
4704 if (isEmptyRecord(getContext(), Ty, true)) {
4705 Address Addr(CGF.Builder.CreateLoad(VAListAddr, "ap.cur"), SlotSize);
4706 Addr = CGF.Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(Ty));
4710 // The size of the actual thing passed, which might end up just
4711 // being a pointer for indirect types.
4712 auto TyInfo = getContext().getTypeInfoInChars(Ty);
4714 // Arguments bigger than 16 bytes which aren't homogeneous
4715 // aggregates should be passed indirectly.
4716 bool IsIndirect = false;
4717 if (TyInfo.first.getQuantity() > 16) {
4718 const Type *Base = nullptr;
4719 uint64_t Members = 0;
4720 IsIndirect = !isHomogeneousAggregate(Ty, Base, Members);
4723 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect,
4724 TyInfo, SlotSize, /*AllowHigherAlign*/ true);
4727 //===----------------------------------------------------------------------===//
4728 // ARM ABI Implementation
4729 //===----------------------------------------------------------------------===//
4733 class ARMABIInfo : public ABIInfo {
4746 ARMABIInfo(CodeGenTypes &CGT, ABIKind _Kind) : ABIInfo(CGT), Kind(_Kind) {
4750 bool isEABI() const {
4751 switch (getTarget().getTriple().getEnvironment()) {
4752 case llvm::Triple::Android:
4753 case llvm::Triple::EABI:
4754 case llvm::Triple::EABIHF:
4755 case llvm::Triple::GNUEABI:
4756 case llvm::Triple::GNUEABIHF:
4763 bool isEABIHF() const {
4764 switch (getTarget().getTriple().getEnvironment()) {
4765 case llvm::Triple::EABIHF:
4766 case llvm::Triple::GNUEABIHF:
4773 bool isAndroid() const {
4774 return (getTarget().getTriple().getEnvironment() ==
4775 llvm::Triple::Android);
4778 ABIKind getABIKind() const { return Kind; }
4781 ABIArgInfo classifyReturnType(QualType RetTy, bool isVariadic) const;
4782 ABIArgInfo classifyArgumentType(QualType RetTy, bool isVariadic) const;
4783 bool isIllegalVectorType(QualType Ty) const;
4785 bool isHomogeneousAggregateBaseType(QualType Ty) const override;
4786 bool isHomogeneousAggregateSmallEnough(const Type *Ty,
4787 uint64_t Members) const override;
4789 void computeInfo(CGFunctionInfo &FI) const override;
4791 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
4792 QualType Ty) const override;
4794 llvm::CallingConv::ID getLLVMDefaultCC() const;
4795 llvm::CallingConv::ID getABIDefaultCC() const;
4799 class ARMTargetCodeGenInfo : public TargetCodeGenInfo {
4801 ARMTargetCodeGenInfo(CodeGenTypes &CGT, ARMABIInfo::ABIKind K)
4802 :TargetCodeGenInfo(new ARMABIInfo(CGT, K)) {}
4804 const ARMABIInfo &getABIInfo() const {
4805 return static_cast<const ARMABIInfo&>(TargetCodeGenInfo::getABIInfo());
4808 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
4812 StringRef getARCRetainAutoreleasedReturnValueMarker() const override {
4813 return "mov\tr7, r7\t\t@ marker for objc_retainAutoreleaseReturnValue";
4816 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
4817 llvm::Value *Address) const override {
4818 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4);
4820 // 0-15 are the 16 integer registers.
4821 AssignToArrayRange(CGF.Builder, Address, Four8, 0, 15);
4825 unsigned getSizeOfUnwindException() const override {
4826 if (getABIInfo().isEABI()) return 88;
4827 return TargetCodeGenInfo::getSizeOfUnwindException();
4830 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
4831 CodeGen::CodeGenModule &CGM) const override {
4832 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
4836 const ARMInterruptAttr *Attr = FD->getAttr<ARMInterruptAttr>();
4841 switch (Attr->getInterrupt()) {
4842 case ARMInterruptAttr::Generic: Kind = ""; break;
4843 case ARMInterruptAttr::IRQ: Kind = "IRQ"; break;
4844 case ARMInterruptAttr::FIQ: Kind = "FIQ"; break;
4845 case ARMInterruptAttr::SWI: Kind = "SWI"; break;
4846 case ARMInterruptAttr::ABORT: Kind = "ABORT"; break;
4847 case ARMInterruptAttr::UNDEF: Kind = "UNDEF"; break;
4850 llvm::Function *Fn = cast<llvm::Function>(GV);
4852 Fn->addFnAttr("interrupt", Kind);
4854 ARMABIInfo::ABIKind ABI = cast<ARMABIInfo>(getABIInfo()).getABIKind();
4855 if (ABI == ARMABIInfo::APCS)
4858 // AAPCS guarantees that sp will be 8-byte aligned on any public interface,
4859 // however this is not necessarily true on taking any interrupt. Instruct
4860 // the backend to perform a realignment as part of the function prologue.
4861 llvm::AttrBuilder B;
4862 B.addStackAlignmentAttr(8);
4863 Fn->addAttributes(llvm::AttributeSet::FunctionIndex,
4864 llvm::AttributeSet::get(CGM.getLLVMContext(),
4865 llvm::AttributeSet::FunctionIndex,
4870 class WindowsARMTargetCodeGenInfo : public ARMTargetCodeGenInfo {
4871 void addStackProbeSizeTargetAttribute(const Decl *D, llvm::GlobalValue *GV,
4872 CodeGen::CodeGenModule &CGM) const;
4875 WindowsARMTargetCodeGenInfo(CodeGenTypes &CGT, ARMABIInfo::ABIKind K)
4876 : ARMTargetCodeGenInfo(CGT, K) {}
4878 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
4879 CodeGen::CodeGenModule &CGM) const override;
4882 void WindowsARMTargetCodeGenInfo::addStackProbeSizeTargetAttribute(
4883 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const {
4884 if (!isa<FunctionDecl>(D))
4886 if (CGM.getCodeGenOpts().StackProbeSize == 4096)
4889 llvm::Function *F = cast<llvm::Function>(GV);
4890 F->addFnAttr("stack-probe-size",
4891 llvm::utostr(CGM.getCodeGenOpts().StackProbeSize));
4894 void WindowsARMTargetCodeGenInfo::setTargetAttributes(
4895 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &CGM) const {
4896 ARMTargetCodeGenInfo::setTargetAttributes(D, GV, CGM);
4897 addStackProbeSizeTargetAttribute(D, GV, CGM);
4901 void ARMABIInfo::computeInfo(CGFunctionInfo &FI) const {
4902 if (!getCXXABI().classifyReturnType(FI))
4903 FI.getReturnInfo() =
4904 classifyReturnType(FI.getReturnType(), FI.isVariadic());
4906 for (auto &I : FI.arguments())
4907 I.info = classifyArgumentType(I.type, FI.isVariadic());
4909 // Always honor user-specified calling convention.
4910 if (FI.getCallingConvention() != llvm::CallingConv::C)
4913 llvm::CallingConv::ID cc = getRuntimeCC();
4914 if (cc != llvm::CallingConv::C)
4915 FI.setEffectiveCallingConvention(cc);
4918 /// Return the default calling convention that LLVM will use.
4919 llvm::CallingConv::ID ARMABIInfo::getLLVMDefaultCC() const {
4920 // The default calling convention that LLVM will infer.
4921 if (isEABIHF() || getTarget().getTriple().isWatchOS())
4922 return llvm::CallingConv::ARM_AAPCS_VFP;
4924 return llvm::CallingConv::ARM_AAPCS;
4926 return llvm::CallingConv::ARM_APCS;
4929 /// Return the calling convention that our ABI would like us to use
4930 /// as the C calling convention.
4931 llvm::CallingConv::ID ARMABIInfo::getABIDefaultCC() const {
4932 switch (getABIKind()) {
4933 case APCS: return llvm::CallingConv::ARM_APCS;
4934 case AAPCS: return llvm::CallingConv::ARM_AAPCS;
4935 case AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP;
4936 case AAPCS16_VFP: return llvm::CallingConv::ARM_AAPCS_VFP;
4938 llvm_unreachable("bad ABI kind");
4941 void ARMABIInfo::setCCs() {
4942 assert(getRuntimeCC() == llvm::CallingConv::C);
4944 // Don't muddy up the IR with a ton of explicit annotations if
4945 // they'd just match what LLVM will infer from the triple.
4946 llvm::CallingConv::ID abiCC = getABIDefaultCC();
4947 if (abiCC != getLLVMDefaultCC())
4950 // AAPCS apparently requires runtime support functions to be soft-float, but
4951 // that's almost certainly for historic reasons (Thumb1 not supporting VFP
4952 // most likely). It's more convenient for AAPCS16_VFP to be hard-float.
4953 switch (getABIKind()) {
4956 if (abiCC != getLLVMDefaultCC())
4961 BuiltinCC = llvm::CallingConv::ARM_AAPCS;
4966 ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty,
4967 bool isVariadic) const {
4968 // 6.1.2.1 The following argument types are VFP CPRCs:
4969 // A single-precision floating-point type (including promoted
4970 // half-precision types); A double-precision floating-point type;
4971 // A 64-bit or 128-bit containerized vector type; Homogeneous Aggregate
4972 // with a Base Type of a single- or double-precision floating-point type,
4973 // 64-bit containerized vectors or 128-bit containerized vectors with one
4974 // to four Elements.
4975 bool IsEffectivelyAAPCS_VFP = getABIKind() == AAPCS_VFP && !isVariadic;
4977 Ty = useFirstFieldIfTransparentUnion(Ty);
4979 // Handle illegal vector types here.
4980 if (isIllegalVectorType(Ty)) {
4981 uint64_t Size = getContext().getTypeSize(Ty);
4983 llvm::Type *ResType =
4984 llvm::Type::getInt32Ty(getVMContext());
4985 return ABIArgInfo::getDirect(ResType);
4988 llvm::Type *ResType = llvm::VectorType::get(
4989 llvm::Type::getInt32Ty(getVMContext()), 2);
4990 return ABIArgInfo::getDirect(ResType);
4993 llvm::Type *ResType = llvm::VectorType::get(
4994 llvm::Type::getInt32Ty(getVMContext()), 4);
4995 return ABIArgInfo::getDirect(ResType);
4997 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
5000 // __fp16 gets passed as if it were an int or float, but with the top 16 bits
5001 // unspecified. This is not done for OpenCL as it handles the half type
5002 // natively, and does not need to interwork with AAPCS code.
5003 if (Ty->isHalfType() && !getContext().getLangOpts().OpenCL) {
5004 llvm::Type *ResType = IsEffectivelyAAPCS_VFP ?
5005 llvm::Type::getFloatTy(getVMContext()) :
5006 llvm::Type::getInt32Ty(getVMContext());
5007 return ABIArgInfo::getDirect(ResType);
5010 if (!isAggregateTypeForABI(Ty)) {
5011 // Treat an enum type as its underlying type.
5012 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) {
5013 Ty = EnumTy->getDecl()->getIntegerType();
5016 return (Ty->isPromotableIntegerType() ? ABIArgInfo::getExtend()
5017 : ABIArgInfo::getDirect());
5020 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) {
5021 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
5024 // Ignore empty records.
5025 if (isEmptyRecord(getContext(), Ty, true))
5026 return ABIArgInfo::getIgnore();
5028 if (IsEffectivelyAAPCS_VFP) {
5029 // Homogeneous Aggregates need to be expanded when we can fit the aggregate
5030 // into VFP registers.
5031 const Type *Base = nullptr;
5032 uint64_t Members = 0;
5033 if (isHomogeneousAggregate(Ty, Base, Members)) {
5034 assert(Base && "Base class should be set for homogeneous aggregate");
5035 // Base can be a floating-point or a vector.
5036 return ABIArgInfo::getDirect(nullptr, 0, nullptr, false);
5038 } else if (getABIKind() == ARMABIInfo::AAPCS16_VFP) {
5039 // WatchOS does have homogeneous aggregates. Note that we intentionally use
5040 // this convention even for a variadic function: the backend will use GPRs
5042 const Type *Base = nullptr;
5043 uint64_t Members = 0;
5044 if (isHomogeneousAggregate(Ty, Base, Members)) {
5045 assert(Base && Members <= 4 && "unexpected homogeneous aggregate");
5047 llvm::ArrayType::get(CGT.ConvertType(QualType(Base, 0)), Members);
5048 return ABIArgInfo::getDirect(Ty, 0, nullptr, false);
5052 if (getABIKind() == ARMABIInfo::AAPCS16_VFP &&
5053 getContext().getTypeSizeInChars(Ty) > CharUnits::fromQuantity(16)) {
5054 // WatchOS is adopting the 64-bit AAPCS rule on composite types: if they're
5055 // bigger than 128-bits, they get placed in space allocated by the caller,
5056 // and a pointer is passed.
5057 return ABIArgInfo::getIndirect(
5058 CharUnits::fromQuantity(getContext().getTypeAlign(Ty) / 8), false);
5061 // Support byval for ARM.
5062 // The ABI alignment for APCS is 4-byte and for AAPCS at least 4-byte and at
5063 // most 8-byte. We realign the indirect argument if type alignment is bigger
5064 // than ABI alignment.
5065 uint64_t ABIAlign = 4;
5066 uint64_t TyAlign = getContext().getTypeAlign(Ty) / 8;
5067 if (getABIKind() == ARMABIInfo::AAPCS_VFP ||
5068 getABIKind() == ARMABIInfo::AAPCS)
5069 ABIAlign = std::min(std::max(TyAlign, (uint64_t)4), (uint64_t)8);
5071 if (getContext().getTypeSizeInChars(Ty) > CharUnits::fromQuantity(64)) {
5072 assert(getABIKind() != ARMABIInfo::AAPCS16_VFP && "unexpected byval");
5073 return ABIArgInfo::getIndirect(CharUnits::fromQuantity(ABIAlign),
5075 /*Realign=*/TyAlign > ABIAlign);
5078 // Otherwise, pass by coercing to a structure of the appropriate size.
5081 // FIXME: Try to match the types of the arguments more accurately where
5083 if (getContext().getTypeAlign(Ty) <= 32) {
5084 ElemTy = llvm::Type::getInt32Ty(getVMContext());
5085 SizeRegs = (getContext().getTypeSize(Ty) + 31) / 32;
5087 ElemTy = llvm::Type::getInt64Ty(getVMContext());
5088 SizeRegs = (getContext().getTypeSize(Ty) + 63) / 64;
5091 return ABIArgInfo::getDirect(llvm::ArrayType::get(ElemTy, SizeRegs));
5094 static bool isIntegerLikeType(QualType Ty, ASTContext &Context,
5095 llvm::LLVMContext &VMContext) {
5096 // APCS, C Language Calling Conventions, Non-Simple Return Values: A structure
5097 // is called integer-like if its size is less than or equal to one word, and
5098 // the offset of each of its addressable sub-fields is zero.
5100 uint64_t Size = Context.getTypeSize(Ty);
5102 // Check that the type fits in a word.
5106 // FIXME: Handle vector types!
5107 if (Ty->isVectorType())
5110 // Float types are never treated as "integer like".
5111 if (Ty->isRealFloatingType())
5114 // If this is a builtin or pointer type then it is ok.
5115 if (Ty->getAs<BuiltinType>() || Ty->isPointerType())
5118 // Small complex integer types are "integer like".
5119 if (const ComplexType *CT = Ty->getAs<ComplexType>())
5120 return isIntegerLikeType(CT->getElementType(), Context, VMContext);
5122 // Single element and zero sized arrays should be allowed, by the definition
5123 // above, but they are not.
5125 // Otherwise, it must be a record type.
5126 const RecordType *RT = Ty->getAs<RecordType>();
5127 if (!RT) return false;
5129 // Ignore records with flexible arrays.
5130 const RecordDecl *RD = RT->getDecl();
5131 if (RD->hasFlexibleArrayMember())
5134 // Check that all sub-fields are at offset 0, and are themselves "integer
5136 const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
5138 bool HadField = false;
5140 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
5141 i != e; ++i, ++idx) {
5142 const FieldDecl *FD = *i;
5144 // Bit-fields are not addressable, we only need to verify they are "integer
5145 // like". We still have to disallow a subsequent non-bitfield, for example:
5146 // struct { int : 0; int x }
5147 // is non-integer like according to gcc.
5148 if (FD->isBitField()) {
5152 if (!isIntegerLikeType(FD->getType(), Context, VMContext))
5158 // Check if this field is at offset 0.
5159 if (Layout.getFieldOffset(idx) != 0)
5162 if (!isIntegerLikeType(FD->getType(), Context, VMContext))
5165 // Only allow at most one field in a structure. This doesn't match the
5166 // wording above, but follows gcc in situations with a field following an
5168 if (!RD->isUnion()) {
5179 ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy,
5180 bool isVariadic) const {
5181 bool IsEffectivelyAAPCS_VFP =
5182 (getABIKind() == AAPCS_VFP || getABIKind() == AAPCS16_VFP) && !isVariadic;
5184 if (RetTy->isVoidType())
5185 return ABIArgInfo::getIgnore();
5187 // Large vector types should be returned via memory.
5188 if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 128) {
5189 return getNaturalAlignIndirect(RetTy);
5192 // __fp16 gets returned as if it were an int or float, but with the top 16
5193 // bits unspecified. This is not done for OpenCL as it handles the half type
5194 // natively, and does not need to interwork with AAPCS code.
5195 if (RetTy->isHalfType() && !getContext().getLangOpts().OpenCL) {
5196 llvm::Type *ResType = IsEffectivelyAAPCS_VFP ?
5197 llvm::Type::getFloatTy(getVMContext()) :
5198 llvm::Type::getInt32Ty(getVMContext());
5199 return ABIArgInfo::getDirect(ResType);
5202 if (!isAggregateTypeForABI(RetTy)) {
5203 // Treat an enum type as its underlying type.
5204 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
5205 RetTy = EnumTy->getDecl()->getIntegerType();
5207 return RetTy->isPromotableIntegerType() ? ABIArgInfo::getExtend()
5208 : ABIArgInfo::getDirect();
5211 // Are we following APCS?
5212 if (getABIKind() == APCS) {
5213 if (isEmptyRecord(getContext(), RetTy, false))
5214 return ABIArgInfo::getIgnore();
5216 // Complex types are all returned as packed integers.
5218 // FIXME: Consider using 2 x vector types if the back end handles them
5220 if (RetTy->isAnyComplexType())
5221 return ABIArgInfo::getDirect(llvm::IntegerType::get(
5222 getVMContext(), getContext().getTypeSize(RetTy)));
5224 // Integer like structures are returned in r0.
5225 if (isIntegerLikeType(RetTy, getContext(), getVMContext())) {
5226 // Return in the smallest viable integer type.
5227 uint64_t Size = getContext().getTypeSize(RetTy);
5229 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
5231 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
5232 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
5235 // Otherwise return in memory.
5236 return getNaturalAlignIndirect(RetTy);
5239 // Otherwise this is an AAPCS variant.
5241 if (isEmptyRecord(getContext(), RetTy, true))
5242 return ABIArgInfo::getIgnore();
5244 // Check for homogeneous aggregates with AAPCS-VFP.
5245 if (IsEffectivelyAAPCS_VFP) {
5246 const Type *Base = nullptr;
5247 uint64_t Members = 0;
5248 if (isHomogeneousAggregate(RetTy, Base, Members)) {
5249 assert(Base && "Base class should be set for homogeneous aggregate");
5250 // Homogeneous Aggregates are returned directly.
5251 return ABIArgInfo::getDirect(nullptr, 0, nullptr, false);
5255 // Aggregates <= 4 bytes are returned in r0; other aggregates
5256 // are returned indirectly.
5257 uint64_t Size = getContext().getTypeSize(RetTy);
5259 if (getDataLayout().isBigEndian())
5260 // Return in 32 bit integer integer type (as if loaded by LDR, AAPCS 5.4)
5261 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
5263 // Return in the smallest viable integer type.
5265 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
5267 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
5268 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
5269 } else if (Size <= 128 && getABIKind() == AAPCS16_VFP) {
5270 llvm::Type *Int32Ty = llvm::Type::getInt32Ty(getVMContext());
5271 llvm::Type *CoerceTy =
5272 llvm::ArrayType::get(Int32Ty, llvm::RoundUpToAlignment(Size, 32) / 32);
5273 return ABIArgInfo::getDirect(CoerceTy);
5276 return getNaturalAlignIndirect(RetTy);
5279 /// isIllegalVector - check whether Ty is an illegal vector type.
5280 bool ARMABIInfo::isIllegalVectorType(QualType Ty) const {
5281 if (const VectorType *VT = Ty->getAs<VectorType> ()) {
5283 // Android shipped using Clang 3.1, which supported a slightly different
5284 // vector ABI. The primary differences were that 3-element vector types
5285 // were legal, and so were sub 32-bit vectors (i.e. <2 x i8>). This path
5286 // accepts that legacy behavior for Android only.
5287 // Check whether VT is legal.
5288 unsigned NumElements = VT->getNumElements();
5289 // NumElements should be power of 2 or equal to 3.
5290 if (!llvm::isPowerOf2_32(NumElements) && NumElements != 3)
5293 // Check whether VT is legal.
5294 unsigned NumElements = VT->getNumElements();
5295 uint64_t Size = getContext().getTypeSize(VT);
5296 // NumElements should be power of 2.
5297 if (!llvm::isPowerOf2_32(NumElements))
5299 // Size should be greater than 32 bits.
5306 bool ARMABIInfo::isHomogeneousAggregateBaseType(QualType Ty) const {
5307 // Homogeneous aggregates for AAPCS-VFP must have base types of float,
5308 // double, or 64-bit or 128-bit vectors.
5309 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
5310 if (BT->getKind() == BuiltinType::Float ||
5311 BT->getKind() == BuiltinType::Double ||
5312 BT->getKind() == BuiltinType::LongDouble)
5314 } else if (const VectorType *VT = Ty->getAs<VectorType>()) {
5315 unsigned VecSize = getContext().getTypeSize(VT);
5316 if (VecSize == 64 || VecSize == 128)
5322 bool ARMABIInfo::isHomogeneousAggregateSmallEnough(const Type *Base,
5323 uint64_t Members) const {
5324 return Members <= 4;
5327 Address ARMABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
5328 QualType Ty) const {
5329 CharUnits SlotSize = CharUnits::fromQuantity(4);
5331 // Empty records are ignored for parameter passing purposes.
5332 if (isEmptyRecord(getContext(), Ty, true)) {
5333 Address Addr(CGF.Builder.CreateLoad(VAListAddr), SlotSize);
5334 Addr = CGF.Builder.CreateElementBitCast(Addr, CGF.ConvertTypeForMem(Ty));
5338 auto TyInfo = getContext().getTypeInfoInChars(Ty);
5339 CharUnits TyAlignForABI = TyInfo.second;
5341 // Use indirect if size of the illegal vector is bigger than 16 bytes.
5342 bool IsIndirect = false;
5343 const Type *Base = nullptr;
5344 uint64_t Members = 0;
5345 if (TyInfo.first > CharUnits::fromQuantity(16) && isIllegalVectorType(Ty)) {
5348 // ARMv7k passes structs bigger than 16 bytes indirectly, in space
5349 // allocated by the caller.
5350 } else if (TyInfo.first > CharUnits::fromQuantity(16) &&
5351 getABIKind() == ARMABIInfo::AAPCS16_VFP &&
5352 !isHomogeneousAggregate(Ty, Base, Members)) {
5355 // Otherwise, bound the type's ABI alignment.
5356 // The ABI alignment for 64-bit or 128-bit vectors is 8 for AAPCS and 4 for
5357 // APCS. For AAPCS, the ABI alignment is at least 4-byte and at most 8-byte.
5358 // Our callers should be prepared to handle an under-aligned address.
5359 } else if (getABIKind() == ARMABIInfo::AAPCS_VFP ||
5360 getABIKind() == ARMABIInfo::AAPCS) {
5361 TyAlignForABI = std::max(TyAlignForABI, CharUnits::fromQuantity(4));
5362 TyAlignForABI = std::min(TyAlignForABI, CharUnits::fromQuantity(8));
5363 } else if (getABIKind() == ARMABIInfo::AAPCS16_VFP) {
5364 // ARMv7k allows type alignment up to 16 bytes.
5365 TyAlignForABI = std::max(TyAlignForABI, CharUnits::fromQuantity(4));
5366 TyAlignForABI = std::min(TyAlignForABI, CharUnits::fromQuantity(16));
5368 TyAlignForABI = CharUnits::fromQuantity(4);
5370 TyInfo.second = TyAlignForABI;
5372 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, IsIndirect, TyInfo,
5373 SlotSize, /*AllowHigherAlign*/ true);
5376 //===----------------------------------------------------------------------===//
5377 // NVPTX ABI Implementation
5378 //===----------------------------------------------------------------------===//
5382 class NVPTXABIInfo : public ABIInfo {
5384 NVPTXABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
5386 ABIArgInfo classifyReturnType(QualType RetTy) const;
5387 ABIArgInfo classifyArgumentType(QualType Ty) const;
5389 void computeInfo(CGFunctionInfo &FI) const override;
5390 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
5391 QualType Ty) const override;
5394 class NVPTXTargetCodeGenInfo : public TargetCodeGenInfo {
5396 NVPTXTargetCodeGenInfo(CodeGenTypes &CGT)
5397 : TargetCodeGenInfo(new NVPTXABIInfo(CGT)) {}
5399 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
5400 CodeGen::CodeGenModule &M) const override;
5402 // Adds a NamedMDNode with F, Name, and Operand as operands, and adds the
5403 // resulting MDNode to the nvvm.annotations MDNode.
5404 static void addNVVMMetadata(llvm::Function *F, StringRef Name, int Operand);
5407 ABIArgInfo NVPTXABIInfo::classifyReturnType(QualType RetTy) const {
5408 if (RetTy->isVoidType())
5409 return ABIArgInfo::getIgnore();
5411 // note: this is different from default ABI
5412 if (!RetTy->isScalarType())
5413 return ABIArgInfo::getDirect();
5415 // Treat an enum type as its underlying type.
5416 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
5417 RetTy = EnumTy->getDecl()->getIntegerType();
5419 return (RetTy->isPromotableIntegerType() ?
5420 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
5423 ABIArgInfo NVPTXABIInfo::classifyArgumentType(QualType Ty) const {
5424 // Treat an enum type as its underlying type.
5425 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
5426 Ty = EnumTy->getDecl()->getIntegerType();
5428 // Return aggregates type as indirect by value
5429 if (isAggregateTypeForABI(Ty))
5430 return getNaturalAlignIndirect(Ty, /* byval */ true);
5432 return (Ty->isPromotableIntegerType() ?
5433 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
5436 void NVPTXABIInfo::computeInfo(CGFunctionInfo &FI) const {
5437 if (!getCXXABI().classifyReturnType(FI))
5438 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
5439 for (auto &I : FI.arguments())
5440 I.info = classifyArgumentType(I.type);
5442 // Always honor user-specified calling convention.
5443 if (FI.getCallingConvention() != llvm::CallingConv::C)
5446 FI.setEffectiveCallingConvention(getRuntimeCC());
5449 Address NVPTXABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
5450 QualType Ty) const {
5451 llvm_unreachable("NVPTX does not support varargs");
5454 void NVPTXTargetCodeGenInfo::
5455 setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
5456 CodeGen::CodeGenModule &M) const{
5457 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
5460 llvm::Function *F = cast<llvm::Function>(GV);
5462 // Perform special handling in OpenCL mode
5463 if (M.getLangOpts().OpenCL) {
5464 // Use OpenCL function attributes to check for kernel functions
5465 // By default, all functions are device functions
5466 if (FD->hasAttr<OpenCLKernelAttr>()) {
5467 // OpenCL __kernel functions get kernel metadata
5468 // Create !{<func-ref>, metadata !"kernel", i32 1} node
5469 addNVVMMetadata(F, "kernel", 1);
5470 // And kernel functions are not subject to inlining
5471 F->addFnAttr(llvm::Attribute::NoInline);
5475 // Perform special handling in CUDA mode.
5476 if (M.getLangOpts().CUDA) {
5477 // CUDA __global__ functions get a kernel metadata entry. Since
5478 // __global__ functions cannot be called from the device, we do not
5479 // need to set the noinline attribute.
5480 if (FD->hasAttr<CUDAGlobalAttr>()) {
5481 // Create !{<func-ref>, metadata !"kernel", i32 1} node
5482 addNVVMMetadata(F, "kernel", 1);
5484 if (CUDALaunchBoundsAttr *Attr = FD->getAttr<CUDALaunchBoundsAttr>()) {
5485 // Create !{<func-ref>, metadata !"maxntidx", i32 <val>} node
5486 llvm::APSInt MaxThreads(32);
5487 MaxThreads = Attr->getMaxThreads()->EvaluateKnownConstInt(M.getContext());
5489 addNVVMMetadata(F, "maxntidx", MaxThreads.getExtValue());
5491 // min blocks is an optional argument for CUDALaunchBoundsAttr. If it was
5492 // not specified in __launch_bounds__ or if the user specified a 0 value,
5493 // we don't have to add a PTX directive.
5494 if (Attr->getMinBlocks()) {
5495 llvm::APSInt MinBlocks(32);
5496 MinBlocks = Attr->getMinBlocks()->EvaluateKnownConstInt(M.getContext());
5498 // Create !{<func-ref>, metadata !"minctasm", i32 <val>} node
5499 addNVVMMetadata(F, "minctasm", MinBlocks.getExtValue());
5505 void NVPTXTargetCodeGenInfo::addNVVMMetadata(llvm::Function *F, StringRef Name,
5507 llvm::Module *M = F->getParent();
5508 llvm::LLVMContext &Ctx = M->getContext();
5510 // Get "nvvm.annotations" metadata node
5511 llvm::NamedMDNode *MD = M->getOrInsertNamedMetadata("nvvm.annotations");
5513 llvm::Metadata *MDVals[] = {
5514 llvm::ConstantAsMetadata::get(F), llvm::MDString::get(Ctx, Name),
5515 llvm::ConstantAsMetadata::get(
5516 llvm::ConstantInt::get(llvm::Type::getInt32Ty(Ctx), Operand))};
5517 // Append metadata to nvvm.annotations
5518 MD->addOperand(llvm::MDNode::get(Ctx, MDVals));
5522 //===----------------------------------------------------------------------===//
5523 // SystemZ ABI Implementation
5524 //===----------------------------------------------------------------------===//
5528 class SystemZABIInfo : public ABIInfo {
5532 SystemZABIInfo(CodeGenTypes &CGT, bool HV)
5533 : ABIInfo(CGT), HasVector(HV) {}
5535 bool isPromotableIntegerType(QualType Ty) const;
5536 bool isCompoundType(QualType Ty) const;
5537 bool isVectorArgumentType(QualType Ty) const;
5538 bool isFPArgumentType(QualType Ty) const;
5539 QualType GetSingleElementType(QualType Ty) const;
5541 ABIArgInfo classifyReturnType(QualType RetTy) const;
5542 ABIArgInfo classifyArgumentType(QualType ArgTy) const;
5544 void computeInfo(CGFunctionInfo &FI) const override {
5545 if (!getCXXABI().classifyReturnType(FI))
5546 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
5547 for (auto &I : FI.arguments())
5548 I.info = classifyArgumentType(I.type);
5551 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
5552 QualType Ty) const override;
5555 class SystemZTargetCodeGenInfo : public TargetCodeGenInfo {
5557 SystemZTargetCodeGenInfo(CodeGenTypes &CGT, bool HasVector)
5558 : TargetCodeGenInfo(new SystemZABIInfo(CGT, HasVector)) {}
5563 bool SystemZABIInfo::isPromotableIntegerType(QualType Ty) const {
5564 // Treat an enum type as its underlying type.
5565 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
5566 Ty = EnumTy->getDecl()->getIntegerType();
5568 // Promotable integer types are required to be promoted by the ABI.
5569 if (Ty->isPromotableIntegerType())
5572 // 32-bit values must also be promoted.
5573 if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
5574 switch (BT->getKind()) {
5575 case BuiltinType::Int:
5576 case BuiltinType::UInt:
5584 bool SystemZABIInfo::isCompoundType(QualType Ty) const {
5585 return (Ty->isAnyComplexType() ||
5586 Ty->isVectorType() ||
5587 isAggregateTypeForABI(Ty));
5590 bool SystemZABIInfo::isVectorArgumentType(QualType Ty) const {
5591 return (HasVector &&
5592 Ty->isVectorType() &&
5593 getContext().getTypeSize(Ty) <= 128);
5596 bool SystemZABIInfo::isFPArgumentType(QualType Ty) const {
5597 if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
5598 switch (BT->getKind()) {
5599 case BuiltinType::Float:
5600 case BuiltinType::Double:
5609 QualType SystemZABIInfo::GetSingleElementType(QualType Ty) const {
5610 if (const RecordType *RT = Ty->getAsStructureType()) {
5611 const RecordDecl *RD = RT->getDecl();
5614 // If this is a C++ record, check the bases first.
5615 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
5616 for (const auto &I : CXXRD->bases()) {
5617 QualType Base = I.getType();
5619 // Empty bases don't affect things either way.
5620 if (isEmptyRecord(getContext(), Base, true))
5623 if (!Found.isNull())
5625 Found = GetSingleElementType(Base);
5628 // Check the fields.
5629 for (const auto *FD : RD->fields()) {
5630 // For compatibility with GCC, ignore empty bitfields in C++ mode.
5631 // Unlike isSingleElementStruct(), empty structure and array fields
5632 // do count. So do anonymous bitfields that aren't zero-sized.
5633 if (getContext().getLangOpts().CPlusPlus &&
5634 FD->isBitField() && FD->getBitWidthValue(getContext()) == 0)
5637 // Unlike isSingleElementStruct(), arrays do not count.
5638 // Nested structures still do though.
5639 if (!Found.isNull())
5641 Found = GetSingleElementType(FD->getType());
5644 // Unlike isSingleElementStruct(), trailing padding is allowed.
5645 // An 8-byte aligned struct s { float f; } is passed as a double.
5646 if (!Found.isNull())
5653 Address SystemZABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
5654 QualType Ty) const {
5655 // Assume that va_list type is correct; should be pointer to LLVM type:
5659 // i8 *__overflow_arg_area;
5660 // i8 *__reg_save_area;
5663 // Every non-vector argument occupies 8 bytes and is passed by preference
5664 // in either GPRs or FPRs. Vector arguments occupy 8 or 16 bytes and are
5665 // always passed on the stack.
5666 Ty = getContext().getCanonicalType(Ty);
5667 auto TyInfo = getContext().getTypeInfoInChars(Ty);
5668 llvm::Type *ArgTy = CGF.ConvertTypeForMem(Ty);
5669 llvm::Type *DirectTy = ArgTy;
5670 ABIArgInfo AI = classifyArgumentType(Ty);
5671 bool IsIndirect = AI.isIndirect();
5672 bool InFPRs = false;
5673 bool IsVector = false;
5674 CharUnits UnpaddedSize;
5675 CharUnits DirectAlign;
5677 DirectTy = llvm::PointerType::getUnqual(DirectTy);
5678 UnpaddedSize = DirectAlign = CharUnits::fromQuantity(8);
5680 if (AI.getCoerceToType())
5681 ArgTy = AI.getCoerceToType();
5682 InFPRs = ArgTy->isFloatTy() || ArgTy->isDoubleTy();
5683 IsVector = ArgTy->isVectorTy();
5684 UnpaddedSize = TyInfo.first;
5685 DirectAlign = TyInfo.second;
5687 CharUnits PaddedSize = CharUnits::fromQuantity(8);
5688 if (IsVector && UnpaddedSize > PaddedSize)
5689 PaddedSize = CharUnits::fromQuantity(16);
5690 assert((UnpaddedSize <= PaddedSize) && "Invalid argument size.");
5692 CharUnits Padding = (PaddedSize - UnpaddedSize);
5694 llvm::Type *IndexTy = CGF.Int64Ty;
5695 llvm::Value *PaddedSizeV =
5696 llvm::ConstantInt::get(IndexTy, PaddedSize.getQuantity());
5699 // Work out the address of a vector argument on the stack.
5700 // Vector arguments are always passed in the high bits of a
5701 // single (8 byte) or double (16 byte) stack slot.
5702 Address OverflowArgAreaPtr =
5703 CGF.Builder.CreateStructGEP(VAListAddr, 2, CharUnits::fromQuantity(16),
5704 "overflow_arg_area_ptr");
5705 Address OverflowArgArea =
5706 Address(CGF.Builder.CreateLoad(OverflowArgAreaPtr, "overflow_arg_area"),
5709 CGF.Builder.CreateElementBitCast(OverflowArgArea, DirectTy, "mem_addr");
5711 // Update overflow_arg_area_ptr pointer
5712 llvm::Value *NewOverflowArgArea =
5713 CGF.Builder.CreateGEP(OverflowArgArea.getPointer(), PaddedSizeV,
5714 "overflow_arg_area");
5715 CGF.Builder.CreateStore(NewOverflowArgArea, OverflowArgAreaPtr);
5720 assert(PaddedSize.getQuantity() == 8);
5722 unsigned MaxRegs, RegCountField, RegSaveIndex;
5723 CharUnits RegPadding;
5725 MaxRegs = 4; // Maximum of 4 FPR arguments
5726 RegCountField = 1; // __fpr
5727 RegSaveIndex = 16; // save offset for f0
5728 RegPadding = CharUnits(); // floats are passed in the high bits of an FPR
5730 MaxRegs = 5; // Maximum of 5 GPR arguments
5731 RegCountField = 0; // __gpr
5732 RegSaveIndex = 2; // save offset for r2
5733 RegPadding = Padding; // values are passed in the low bits of a GPR
5736 Address RegCountPtr = CGF.Builder.CreateStructGEP(
5737 VAListAddr, RegCountField, RegCountField * CharUnits::fromQuantity(8),
5739 llvm::Value *RegCount = CGF.Builder.CreateLoad(RegCountPtr, "reg_count");
5740 llvm::Value *MaxRegsV = llvm::ConstantInt::get(IndexTy, MaxRegs);
5741 llvm::Value *InRegs = CGF.Builder.CreateICmpULT(RegCount, MaxRegsV,
5744 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
5745 llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem");
5746 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
5747 CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock);
5749 // Emit code to load the value if it was passed in registers.
5750 CGF.EmitBlock(InRegBlock);
5752 // Work out the address of an argument register.
5753 llvm::Value *ScaledRegCount =
5754 CGF.Builder.CreateMul(RegCount, PaddedSizeV, "scaled_reg_count");
5755 llvm::Value *RegBase =
5756 llvm::ConstantInt::get(IndexTy, RegSaveIndex * PaddedSize.getQuantity()
5757 + RegPadding.getQuantity());
5758 llvm::Value *RegOffset =
5759 CGF.Builder.CreateAdd(ScaledRegCount, RegBase, "reg_offset");
5760 Address RegSaveAreaPtr =
5761 CGF.Builder.CreateStructGEP(VAListAddr, 3, CharUnits::fromQuantity(24),
5762 "reg_save_area_ptr");
5763 llvm::Value *RegSaveArea =
5764 CGF.Builder.CreateLoad(RegSaveAreaPtr, "reg_save_area");
5765 Address RawRegAddr(CGF.Builder.CreateGEP(RegSaveArea, RegOffset,
5769 CGF.Builder.CreateElementBitCast(RawRegAddr, DirectTy, "reg_addr");
5771 // Update the register count
5772 llvm::Value *One = llvm::ConstantInt::get(IndexTy, 1);
5773 llvm::Value *NewRegCount =
5774 CGF.Builder.CreateAdd(RegCount, One, "reg_count");
5775 CGF.Builder.CreateStore(NewRegCount, RegCountPtr);
5776 CGF.EmitBranch(ContBlock);
5778 // Emit code to load the value if it was passed in memory.
5779 CGF.EmitBlock(InMemBlock);
5781 // Work out the address of a stack argument.
5782 Address OverflowArgAreaPtr = CGF.Builder.CreateStructGEP(
5783 VAListAddr, 2, CharUnits::fromQuantity(16), "overflow_arg_area_ptr");
5784 Address OverflowArgArea =
5785 Address(CGF.Builder.CreateLoad(OverflowArgAreaPtr, "overflow_arg_area"),
5787 Address RawMemAddr =
5788 CGF.Builder.CreateConstByteGEP(OverflowArgArea, Padding, "raw_mem_addr");
5790 CGF.Builder.CreateElementBitCast(RawMemAddr, DirectTy, "mem_addr");
5792 // Update overflow_arg_area_ptr pointer
5793 llvm::Value *NewOverflowArgArea =
5794 CGF.Builder.CreateGEP(OverflowArgArea.getPointer(), PaddedSizeV,
5795 "overflow_arg_area");
5796 CGF.Builder.CreateStore(NewOverflowArgArea, OverflowArgAreaPtr);
5797 CGF.EmitBranch(ContBlock);
5799 // Return the appropriate result.
5800 CGF.EmitBlock(ContBlock);
5801 Address ResAddr = emitMergePHI(CGF, RegAddr, InRegBlock,
5802 MemAddr, InMemBlock, "va_arg.addr");
5805 ResAddr = Address(CGF.Builder.CreateLoad(ResAddr, "indirect_arg"),
5811 ABIArgInfo SystemZABIInfo::classifyReturnType(QualType RetTy) const {
5812 if (RetTy->isVoidType())
5813 return ABIArgInfo::getIgnore();
5814 if (isVectorArgumentType(RetTy))
5815 return ABIArgInfo::getDirect();
5816 if (isCompoundType(RetTy) || getContext().getTypeSize(RetTy) > 64)
5817 return getNaturalAlignIndirect(RetTy);
5818 return (isPromotableIntegerType(RetTy) ?
5819 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
5822 ABIArgInfo SystemZABIInfo::classifyArgumentType(QualType Ty) const {
5823 // Handle the generic C++ ABI.
5824 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
5825 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
5827 // Integers and enums are extended to full register width.
5828 if (isPromotableIntegerType(Ty))
5829 return ABIArgInfo::getExtend();
5831 // Handle vector types and vector-like structure types. Note that
5832 // as opposed to float-like structure types, we do not allow any
5833 // padding for vector-like structures, so verify the sizes match.
5834 uint64_t Size = getContext().getTypeSize(Ty);
5835 QualType SingleElementTy = GetSingleElementType(Ty);
5836 if (isVectorArgumentType(SingleElementTy) &&
5837 getContext().getTypeSize(SingleElementTy) == Size)
5838 return ABIArgInfo::getDirect(CGT.ConvertType(SingleElementTy));
5840 // Values that are not 1, 2, 4 or 8 bytes in size are passed indirectly.
5841 if (Size != 8 && Size != 16 && Size != 32 && Size != 64)
5842 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
5844 // Handle small structures.
5845 if (const RecordType *RT = Ty->getAs<RecordType>()) {
5846 // Structures with flexible arrays have variable length, so really
5847 // fail the size test above.
5848 const RecordDecl *RD = RT->getDecl();
5849 if (RD->hasFlexibleArrayMember())
5850 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
5852 // The structure is passed as an unextended integer, a float, or a double.
5854 if (isFPArgumentType(SingleElementTy)) {
5855 assert(Size == 32 || Size == 64);
5857 PassTy = llvm::Type::getFloatTy(getVMContext());
5859 PassTy = llvm::Type::getDoubleTy(getVMContext());
5861 PassTy = llvm::IntegerType::get(getVMContext(), Size);
5862 return ABIArgInfo::getDirect(PassTy);
5865 // Non-structure compounds are passed indirectly.
5866 if (isCompoundType(Ty))
5867 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
5869 return ABIArgInfo::getDirect(nullptr);
5872 //===----------------------------------------------------------------------===//
5873 // MSP430 ABI Implementation
5874 //===----------------------------------------------------------------------===//
5878 class MSP430TargetCodeGenInfo : public TargetCodeGenInfo {
5880 MSP430TargetCodeGenInfo(CodeGenTypes &CGT)
5881 : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {}
5882 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
5883 CodeGen::CodeGenModule &M) const override;
5888 void MSP430TargetCodeGenInfo::setTargetAttributes(const Decl *D,
5889 llvm::GlobalValue *GV,
5890 CodeGen::CodeGenModule &M) const {
5891 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
5892 if (const MSP430InterruptAttr *attr = FD->getAttr<MSP430InterruptAttr>()) {
5893 // Handle 'interrupt' attribute:
5894 llvm::Function *F = cast<llvm::Function>(GV);
5896 // Step 1: Set ISR calling convention.
5897 F->setCallingConv(llvm::CallingConv::MSP430_INTR);
5899 // Step 2: Add attributes goodness.
5900 F->addFnAttr(llvm::Attribute::NoInline);
5902 // Step 3: Emit ISR vector alias.
5903 unsigned Num = attr->getNumber() / 2;
5904 llvm::GlobalAlias::create(llvm::Function::ExternalLinkage,
5905 "__isr_" + Twine(Num), F);
5910 //===----------------------------------------------------------------------===//
5911 // MIPS ABI Implementation. This works for both little-endian and
5912 // big-endian variants.
5913 //===----------------------------------------------------------------------===//
5916 class MipsABIInfo : public ABIInfo {
5918 unsigned MinABIStackAlignInBytes, StackAlignInBytes;
5919 void CoerceToIntArgs(uint64_t TySize,
5920 SmallVectorImpl<llvm::Type *> &ArgList) const;
5921 llvm::Type* HandleAggregates(QualType Ty, uint64_t TySize) const;
5922 llvm::Type* returnAggregateInRegs(QualType RetTy, uint64_t Size) const;
5923 llvm::Type* getPaddingType(uint64_t Align, uint64_t Offset) const;
5925 MipsABIInfo(CodeGenTypes &CGT, bool _IsO32) :
5926 ABIInfo(CGT), IsO32(_IsO32), MinABIStackAlignInBytes(IsO32 ? 4 : 8),
5927 StackAlignInBytes(IsO32 ? 8 : 16) {}
5929 ABIArgInfo classifyReturnType(QualType RetTy) const;
5930 ABIArgInfo classifyArgumentType(QualType RetTy, uint64_t &Offset) const;
5931 void computeInfo(CGFunctionInfo &FI) const override;
5932 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
5933 QualType Ty) const override;
5934 bool shouldSignExtUnsignedType(QualType Ty) const override;
5937 class MIPSTargetCodeGenInfo : public TargetCodeGenInfo {
5938 unsigned SizeOfUnwindException;
5940 MIPSTargetCodeGenInfo(CodeGenTypes &CGT, bool IsO32)
5941 : TargetCodeGenInfo(new MipsABIInfo(CGT, IsO32)),
5942 SizeOfUnwindException(IsO32 ? 24 : 32) {}
5944 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
5948 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
5949 CodeGen::CodeGenModule &CGM) const override {
5950 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
5952 llvm::Function *Fn = cast<llvm::Function>(GV);
5953 if (FD->hasAttr<Mips16Attr>()) {
5954 Fn->addFnAttr("mips16");
5956 else if (FD->hasAttr<NoMips16Attr>()) {
5957 Fn->addFnAttr("nomips16");
5960 const MipsInterruptAttr *Attr = FD->getAttr<MipsInterruptAttr>();
5965 switch (Attr->getInterrupt()) {
5966 case MipsInterruptAttr::eic: Kind = "eic"; break;
5967 case MipsInterruptAttr::sw0: Kind = "sw0"; break;
5968 case MipsInterruptAttr::sw1: Kind = "sw1"; break;
5969 case MipsInterruptAttr::hw0: Kind = "hw0"; break;
5970 case MipsInterruptAttr::hw1: Kind = "hw1"; break;
5971 case MipsInterruptAttr::hw2: Kind = "hw2"; break;
5972 case MipsInterruptAttr::hw3: Kind = "hw3"; break;
5973 case MipsInterruptAttr::hw4: Kind = "hw4"; break;
5974 case MipsInterruptAttr::hw5: Kind = "hw5"; break;
5977 Fn->addFnAttr("interrupt", Kind);
5981 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
5982 llvm::Value *Address) const override;
5984 unsigned getSizeOfUnwindException() const override {
5985 return SizeOfUnwindException;
5990 void MipsABIInfo::CoerceToIntArgs(
5991 uint64_t TySize, SmallVectorImpl<llvm::Type *> &ArgList) const {
5992 llvm::IntegerType *IntTy =
5993 llvm::IntegerType::get(getVMContext(), MinABIStackAlignInBytes * 8);
5995 // Add (TySize / MinABIStackAlignInBytes) args of IntTy.
5996 for (unsigned N = TySize / (MinABIStackAlignInBytes * 8); N; --N)
5997 ArgList.push_back(IntTy);
5999 // If necessary, add one more integer type to ArgList.
6000 unsigned R = TySize % (MinABIStackAlignInBytes * 8);
6003 ArgList.push_back(llvm::IntegerType::get(getVMContext(), R));
6006 // In N32/64, an aligned double precision floating point field is passed in
6008 llvm::Type* MipsABIInfo::HandleAggregates(QualType Ty, uint64_t TySize) const {
6009 SmallVector<llvm::Type*, 8> ArgList, IntArgList;
6012 CoerceToIntArgs(TySize, ArgList);
6013 return llvm::StructType::get(getVMContext(), ArgList);
6016 if (Ty->isComplexType())
6017 return CGT.ConvertType(Ty);
6019 const RecordType *RT = Ty->getAs<RecordType>();
6021 // Unions/vectors are passed in integer registers.
6022 if (!RT || !RT->isStructureOrClassType()) {
6023 CoerceToIntArgs(TySize, ArgList);
6024 return llvm::StructType::get(getVMContext(), ArgList);
6027 const RecordDecl *RD = RT->getDecl();
6028 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
6029 assert(!(TySize % 8) && "Size of structure must be multiple of 8.");
6031 uint64_t LastOffset = 0;
6033 llvm::IntegerType *I64 = llvm::IntegerType::get(getVMContext(), 64);
6035 // Iterate over fields in the struct/class and check if there are any aligned
6037 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
6038 i != e; ++i, ++idx) {
6039 const QualType Ty = i->getType();
6040 const BuiltinType *BT = Ty->getAs<BuiltinType>();
6042 if (!BT || BT->getKind() != BuiltinType::Double)
6045 uint64_t Offset = Layout.getFieldOffset(idx);
6046 if (Offset % 64) // Ignore doubles that are not aligned.
6049 // Add ((Offset - LastOffset) / 64) args of type i64.
6050 for (unsigned j = (Offset - LastOffset) / 64; j > 0; --j)
6051 ArgList.push_back(I64);
6054 ArgList.push_back(llvm::Type::getDoubleTy(getVMContext()));
6055 LastOffset = Offset + 64;
6058 CoerceToIntArgs(TySize - LastOffset, IntArgList);
6059 ArgList.append(IntArgList.begin(), IntArgList.end());
6061 return llvm::StructType::get(getVMContext(), ArgList);
6064 llvm::Type *MipsABIInfo::getPaddingType(uint64_t OrigOffset,
6065 uint64_t Offset) const {
6066 if (OrigOffset + MinABIStackAlignInBytes > Offset)
6069 return llvm::IntegerType::get(getVMContext(), (Offset - OrigOffset) * 8);
6073 MipsABIInfo::classifyArgumentType(QualType Ty, uint64_t &Offset) const {
6074 Ty = useFirstFieldIfTransparentUnion(Ty);
6076 uint64_t OrigOffset = Offset;
6077 uint64_t TySize = getContext().getTypeSize(Ty);
6078 uint64_t Align = getContext().getTypeAlign(Ty) / 8;
6080 Align = std::min(std::max(Align, (uint64_t)MinABIStackAlignInBytes),
6081 (uint64_t)StackAlignInBytes);
6082 unsigned CurrOffset = llvm::RoundUpToAlignment(Offset, Align);
6083 Offset = CurrOffset + llvm::RoundUpToAlignment(TySize, Align * 8) / 8;
6085 if (isAggregateTypeForABI(Ty) || Ty->isVectorType()) {
6086 // Ignore empty aggregates.
6088 return ABIArgInfo::getIgnore();
6090 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) {
6091 Offset = OrigOffset + MinABIStackAlignInBytes;
6092 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
6095 // If we have reached here, aggregates are passed directly by coercing to
6096 // another structure type. Padding is inserted if the offset of the
6097 // aggregate is unaligned.
6098 ABIArgInfo ArgInfo =
6099 ABIArgInfo::getDirect(HandleAggregates(Ty, TySize), 0,
6100 getPaddingType(OrigOffset, CurrOffset));
6101 ArgInfo.setInReg(true);
6105 // Treat an enum type as its underlying type.
6106 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
6107 Ty = EnumTy->getDecl()->getIntegerType();
6109 // All integral types are promoted to the GPR width.
6110 if (Ty->isIntegralOrEnumerationType())
6111 return ABIArgInfo::getExtend();
6113 return ABIArgInfo::getDirect(
6114 nullptr, 0, IsO32 ? nullptr : getPaddingType(OrigOffset, CurrOffset));
6118 MipsABIInfo::returnAggregateInRegs(QualType RetTy, uint64_t Size) const {
6119 const RecordType *RT = RetTy->getAs<RecordType>();
6120 SmallVector<llvm::Type*, 8> RTList;
6122 if (RT && RT->isStructureOrClassType()) {
6123 const RecordDecl *RD = RT->getDecl();
6124 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
6125 unsigned FieldCnt = Layout.getFieldCount();
6127 // N32/64 returns struct/classes in floating point registers if the
6128 // following conditions are met:
6129 // 1. The size of the struct/class is no larger than 128-bit.
6130 // 2. The struct/class has one or two fields all of which are floating
6132 // 3. The offset of the first field is zero (this follows what gcc does).
6134 // Any other composite results are returned in integer registers.
6136 if (FieldCnt && (FieldCnt <= 2) && !Layout.getFieldOffset(0)) {
6137 RecordDecl::field_iterator b = RD->field_begin(), e = RD->field_end();
6138 for (; b != e; ++b) {
6139 const BuiltinType *BT = b->getType()->getAs<BuiltinType>();
6141 if (!BT || !BT->isFloatingPoint())
6144 RTList.push_back(CGT.ConvertType(b->getType()));
6148 return llvm::StructType::get(getVMContext(), RTList,
6149 RD->hasAttr<PackedAttr>());
6155 CoerceToIntArgs(Size, RTList);
6156 return llvm::StructType::get(getVMContext(), RTList);
6159 ABIArgInfo MipsABIInfo::classifyReturnType(QualType RetTy) const {
6160 uint64_t Size = getContext().getTypeSize(RetTy);
6162 if (RetTy->isVoidType())
6163 return ABIArgInfo::getIgnore();
6165 // O32 doesn't treat zero-sized structs differently from other structs.
6166 // However, N32/N64 ignores zero sized return values.
6167 if (!IsO32 && Size == 0)
6168 return ABIArgInfo::getIgnore();
6170 if (isAggregateTypeForABI(RetTy) || RetTy->isVectorType()) {
6172 if (RetTy->isAnyComplexType())
6173 return ABIArgInfo::getDirect();
6175 // O32 returns integer vectors in registers and N32/N64 returns all small
6176 // aggregates in registers.
6178 (RetTy->isVectorType() && !RetTy->hasFloatingRepresentation())) {
6179 ABIArgInfo ArgInfo =
6180 ABIArgInfo::getDirect(returnAggregateInRegs(RetTy, Size));
6181 ArgInfo.setInReg(true);
6186 return getNaturalAlignIndirect(RetTy);
6189 // Treat an enum type as its underlying type.
6190 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
6191 RetTy = EnumTy->getDecl()->getIntegerType();
6193 return (RetTy->isPromotableIntegerType() ?
6194 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
6197 void MipsABIInfo::computeInfo(CGFunctionInfo &FI) const {
6198 ABIArgInfo &RetInfo = FI.getReturnInfo();
6199 if (!getCXXABI().classifyReturnType(FI))
6200 RetInfo = classifyReturnType(FI.getReturnType());
6202 // Check if a pointer to an aggregate is passed as a hidden argument.
6203 uint64_t Offset = RetInfo.isIndirect() ? MinABIStackAlignInBytes : 0;
6205 for (auto &I : FI.arguments())
6206 I.info = classifyArgumentType(I.type, Offset);
6209 Address MipsABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
6210 QualType OrigTy) const {
6211 QualType Ty = OrigTy;
6213 // Integer arguments are promoted to 32-bit on O32 and 64-bit on N32/N64.
6214 // Pointers are also promoted in the same way but this only matters for N32.
6215 unsigned SlotSizeInBits = IsO32 ? 32 : 64;
6216 unsigned PtrWidth = getTarget().getPointerWidth(0);
6217 bool DidPromote = false;
6218 if ((Ty->isIntegerType() &&
6219 getContext().getIntWidth(Ty) < SlotSizeInBits) ||
6220 (Ty->isPointerType() && PtrWidth < SlotSizeInBits)) {
6222 Ty = getContext().getIntTypeForBitwidth(SlotSizeInBits,
6223 Ty->isSignedIntegerType());
6226 auto TyInfo = getContext().getTypeInfoInChars(Ty);
6228 // The alignment of things in the argument area is never larger than
6229 // StackAlignInBytes.
6231 std::min(TyInfo.second, CharUnits::fromQuantity(StackAlignInBytes));
6233 // MinABIStackAlignInBytes is the size of argument slots on the stack.
6234 CharUnits ArgSlotSize = CharUnits::fromQuantity(MinABIStackAlignInBytes);
6236 Address Addr = emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*indirect*/ false,
6237 TyInfo, ArgSlotSize, /*AllowHigherAlign*/ true);
6240 // If there was a promotion, "unpromote" into a temporary.
6241 // TODO: can we just use a pointer into a subset of the original slot?
6243 Address Temp = CGF.CreateMemTemp(OrigTy, "vaarg.promotion-temp");
6244 llvm::Value *Promoted = CGF.Builder.CreateLoad(Addr);
6246 // Truncate down to the right width.
6247 llvm::Type *IntTy = (OrigTy->isIntegerType() ? Temp.getElementType()
6249 llvm::Value *V = CGF.Builder.CreateTrunc(Promoted, IntTy);
6250 if (OrigTy->isPointerType())
6251 V = CGF.Builder.CreateIntToPtr(V, Temp.getElementType());
6253 CGF.Builder.CreateStore(V, Temp);
6260 bool MipsABIInfo::shouldSignExtUnsignedType(QualType Ty) const {
6261 int TySize = getContext().getTypeSize(Ty);
6263 // MIPS64 ABI requires unsigned 32 bit integers to be sign extended.
6264 if (Ty->isUnsignedIntegerOrEnumerationType() && TySize == 32)
6271 MIPSTargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
6272 llvm::Value *Address) const {
6273 // This information comes from gcc's implementation, which seems to
6274 // as canonical as it gets.
6276 // Everything on MIPS is 4 bytes. Double-precision FP registers
6277 // are aliased to pairs of single-precision FP registers.
6278 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4);
6280 // 0-31 are the general purpose registers, $0 - $31.
6281 // 32-63 are the floating-point registers, $f0 - $f31.
6282 // 64 and 65 are the multiply/divide registers, $hi and $lo.
6283 // 66 is the (notional, I think) register for signal-handler return.
6284 AssignToArrayRange(CGF.Builder, Address, Four8, 0, 65);
6286 // 67-74 are the floating-point status registers, $fcc0 - $fcc7.
6287 // They are one bit wide and ignored here.
6289 // 80-111 are the coprocessor 0 registers, $c0r0 - $c0r31.
6290 // (coprocessor 1 is the FP unit)
6291 // 112-143 are the coprocessor 2 registers, $c2r0 - $c2r31.
6292 // 144-175 are the coprocessor 3 registers, $c3r0 - $c3r31.
6293 // 176-181 are the DSP accumulator registers.
6294 AssignToArrayRange(CGF.Builder, Address, Four8, 80, 181);
6298 //===----------------------------------------------------------------------===//
6299 // TCE ABI Implementation (see http://tce.cs.tut.fi). Uses mostly the defaults.
6300 // Currently subclassed only to implement custom OpenCL C function attribute
6302 //===----------------------------------------------------------------------===//
6306 class TCETargetCodeGenInfo : public DefaultTargetCodeGenInfo {
6308 TCETargetCodeGenInfo(CodeGenTypes &CGT)
6309 : DefaultTargetCodeGenInfo(CGT) {}
6311 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
6312 CodeGen::CodeGenModule &M) const override;
6315 void TCETargetCodeGenInfo::setTargetAttributes(
6316 const Decl *D, llvm::GlobalValue *GV, CodeGen::CodeGenModule &M) const {
6317 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
6320 llvm::Function *F = cast<llvm::Function>(GV);
6322 if (M.getLangOpts().OpenCL) {
6323 if (FD->hasAttr<OpenCLKernelAttr>()) {
6324 // OpenCL C Kernel functions are not subject to inlining
6325 F->addFnAttr(llvm::Attribute::NoInline);
6326 const ReqdWorkGroupSizeAttr *Attr = FD->getAttr<ReqdWorkGroupSizeAttr>();
6328 // Convert the reqd_work_group_size() attributes to metadata.
6329 llvm::LLVMContext &Context = F->getContext();
6330 llvm::NamedMDNode *OpenCLMetadata =
6331 M.getModule().getOrInsertNamedMetadata(
6332 "opencl.kernel_wg_size_info");
6334 SmallVector<llvm::Metadata *, 5> Operands;
6335 Operands.push_back(llvm::ConstantAsMetadata::get(F));
6338 llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue(
6339 M.Int32Ty, llvm::APInt(32, Attr->getXDim()))));
6341 llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue(
6342 M.Int32Ty, llvm::APInt(32, Attr->getYDim()))));
6344 llvm::ConstantAsMetadata::get(llvm::Constant::getIntegerValue(
6345 M.Int32Ty, llvm::APInt(32, Attr->getZDim()))));
6347 // Add a boolean constant operand for "required" (true) or "hint"
6348 // (false) for implementing the work_group_size_hint attr later.
6349 // Currently always true as the hint is not yet implemented.
6351 llvm::ConstantAsMetadata::get(llvm::ConstantInt::getTrue(Context)));
6352 OpenCLMetadata->addOperand(llvm::MDNode::get(Context, Operands));
6360 //===----------------------------------------------------------------------===//
6361 // Hexagon ABI Implementation
6362 //===----------------------------------------------------------------------===//
6366 class HexagonABIInfo : public ABIInfo {
6370 HexagonABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
6374 ABIArgInfo classifyReturnType(QualType RetTy) const;
6375 ABIArgInfo classifyArgumentType(QualType RetTy) const;
6377 void computeInfo(CGFunctionInfo &FI) const override;
6379 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
6380 QualType Ty) const override;
6383 class HexagonTargetCodeGenInfo : public TargetCodeGenInfo {
6385 HexagonTargetCodeGenInfo(CodeGenTypes &CGT)
6386 :TargetCodeGenInfo(new HexagonABIInfo(CGT)) {}
6388 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
6395 void HexagonABIInfo::computeInfo(CGFunctionInfo &FI) const {
6396 if (!getCXXABI().classifyReturnType(FI))
6397 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
6398 for (auto &I : FI.arguments())
6399 I.info = classifyArgumentType(I.type);
6402 ABIArgInfo HexagonABIInfo::classifyArgumentType(QualType Ty) const {
6403 if (!isAggregateTypeForABI(Ty)) {
6404 // Treat an enum type as its underlying type.
6405 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
6406 Ty = EnumTy->getDecl()->getIntegerType();
6408 return (Ty->isPromotableIntegerType() ?
6409 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
6412 // Ignore empty records.
6413 if (isEmptyRecord(getContext(), Ty, true))
6414 return ABIArgInfo::getIgnore();
6416 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
6417 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
6419 uint64_t Size = getContext().getTypeSize(Ty);
6421 return getNaturalAlignIndirect(Ty, /*ByVal=*/true);
6422 // Pass in the smallest viable integer type.
6424 return ABIArgInfo::getDirect(llvm::Type::getInt64Ty(getVMContext()));
6426 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
6428 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
6430 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
6433 ABIArgInfo HexagonABIInfo::classifyReturnType(QualType RetTy) const {
6434 if (RetTy->isVoidType())
6435 return ABIArgInfo::getIgnore();
6437 // Large vector types should be returned via memory.
6438 if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 64)
6439 return getNaturalAlignIndirect(RetTy);
6441 if (!isAggregateTypeForABI(RetTy)) {
6442 // Treat an enum type as its underlying type.
6443 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
6444 RetTy = EnumTy->getDecl()->getIntegerType();
6446 return (RetTy->isPromotableIntegerType() ?
6447 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
6450 if (isEmptyRecord(getContext(), RetTy, true))
6451 return ABIArgInfo::getIgnore();
6453 // Aggregates <= 8 bytes are returned in r0; other aggregates
6454 // are returned indirectly.
6455 uint64_t Size = getContext().getTypeSize(RetTy);
6457 // Return in the smallest viable integer type.
6459 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
6461 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
6463 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
6464 return ABIArgInfo::getDirect(llvm::Type::getInt64Ty(getVMContext()));
6467 return getNaturalAlignIndirect(RetTy, /*ByVal=*/true);
6470 Address HexagonABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
6471 QualType Ty) const {
6472 // FIXME: Someone needs to audit that this handle alignment correctly.
6473 return emitVoidPtrVAArg(CGF, VAListAddr, Ty, /*indirect*/ false,
6474 getContext().getTypeInfoInChars(Ty),
6475 CharUnits::fromQuantity(4),
6476 /*AllowHigherAlign*/ true);
6479 //===----------------------------------------------------------------------===//
6480 // AMDGPU ABI Implementation
6481 //===----------------------------------------------------------------------===//
6485 class AMDGPUTargetCodeGenInfo : public TargetCodeGenInfo {
6487 AMDGPUTargetCodeGenInfo(CodeGenTypes &CGT)
6488 : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {}
6489 void setTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
6490 CodeGen::CodeGenModule &M) const override;
6495 void AMDGPUTargetCodeGenInfo::setTargetAttributes(
6497 llvm::GlobalValue *GV,
6498 CodeGen::CodeGenModule &M) const {
6499 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D);
6503 if (const auto Attr = FD->getAttr<AMDGPUNumVGPRAttr>()) {
6504 llvm::Function *F = cast<llvm::Function>(GV);
6505 uint32_t NumVGPR = Attr->getNumVGPR();
6507 F->addFnAttr("amdgpu_num_vgpr", llvm::utostr(NumVGPR));
6510 if (const auto Attr = FD->getAttr<AMDGPUNumSGPRAttr>()) {
6511 llvm::Function *F = cast<llvm::Function>(GV);
6512 unsigned NumSGPR = Attr->getNumSGPR();
6514 F->addFnAttr("amdgpu_num_sgpr", llvm::utostr(NumSGPR));
6519 //===----------------------------------------------------------------------===//
6520 // SPARC v9 ABI Implementation.
6521 // Based on the SPARC Compliance Definition version 2.4.1.
6523 // Function arguments a mapped to a nominal "parameter array" and promoted to
6524 // registers depending on their type. Each argument occupies 8 or 16 bytes in
6525 // the array, structs larger than 16 bytes are passed indirectly.
6527 // One case requires special care:
6534 // When a struct mixed is passed by value, it only occupies 8 bytes in the
6535 // parameter array, but the int is passed in an integer register, and the float
6536 // is passed in a floating point register. This is represented as two arguments
6537 // with the LLVM IR inreg attribute:
6539 // declare void f(i32 inreg %i, float inreg %f)
6541 // The code generator will only allocate 4 bytes from the parameter array for
6542 // the inreg arguments. All other arguments are allocated a multiple of 8
6546 class SparcV9ABIInfo : public ABIInfo {
6548 SparcV9ABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
6551 ABIArgInfo classifyType(QualType RetTy, unsigned SizeLimit) const;
6552 void computeInfo(CGFunctionInfo &FI) const override;
6553 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
6554 QualType Ty) const override;
6556 // Coercion type builder for structs passed in registers. The coercion type
6557 // serves two purposes:
6559 // 1. Pad structs to a multiple of 64 bits, so they are passed 'left-aligned'
6561 // 2. Expose aligned floating point elements as first-level elements, so the
6562 // code generator knows to pass them in floating point registers.
6564 // We also compute the InReg flag which indicates that the struct contains
6565 // aligned 32-bit floats.
6567 struct CoerceBuilder {
6568 llvm::LLVMContext &Context;
6569 const llvm::DataLayout &DL;
6570 SmallVector<llvm::Type*, 8> Elems;
6574 CoerceBuilder(llvm::LLVMContext &c, const llvm::DataLayout &dl)
6575 : Context(c), DL(dl), Size(0), InReg(false) {}
6577 // Pad Elems with integers until Size is ToSize.
6578 void pad(uint64_t ToSize) {
6579 assert(ToSize >= Size && "Cannot remove elements");
6583 // Finish the current 64-bit word.
6584 uint64_t Aligned = llvm::RoundUpToAlignment(Size, 64);
6585 if (Aligned > Size && Aligned <= ToSize) {
6586 Elems.push_back(llvm::IntegerType::get(Context, Aligned - Size));
6590 // Add whole 64-bit words.
6591 while (Size + 64 <= ToSize) {
6592 Elems.push_back(llvm::Type::getInt64Ty(Context));
6596 // Final in-word padding.
6597 if (Size < ToSize) {
6598 Elems.push_back(llvm::IntegerType::get(Context, ToSize - Size));
6603 // Add a floating point element at Offset.
6604 void addFloat(uint64_t Offset, llvm::Type *Ty, unsigned Bits) {
6605 // Unaligned floats are treated as integers.
6608 // The InReg flag is only required if there are any floats < 64 bits.
6612 Elems.push_back(Ty);
6613 Size = Offset + Bits;
6616 // Add a struct type to the coercion type, starting at Offset (in bits).
6617 void addStruct(uint64_t Offset, llvm::StructType *StrTy) {
6618 const llvm::StructLayout *Layout = DL.getStructLayout(StrTy);
6619 for (unsigned i = 0, e = StrTy->getNumElements(); i != e; ++i) {
6620 llvm::Type *ElemTy = StrTy->getElementType(i);
6621 uint64_t ElemOffset = Offset + Layout->getElementOffsetInBits(i);
6622 switch (ElemTy->getTypeID()) {
6623 case llvm::Type::StructTyID:
6624 addStruct(ElemOffset, cast<llvm::StructType>(ElemTy));
6626 case llvm::Type::FloatTyID:
6627 addFloat(ElemOffset, ElemTy, 32);
6629 case llvm::Type::DoubleTyID:
6630 addFloat(ElemOffset, ElemTy, 64);
6632 case llvm::Type::FP128TyID:
6633 addFloat(ElemOffset, ElemTy, 128);
6635 case llvm::Type::PointerTyID:
6636 if (ElemOffset % 64 == 0) {
6638 Elems.push_back(ElemTy);
6648 // Check if Ty is a usable substitute for the coercion type.
6649 bool isUsableType(llvm::StructType *Ty) const {
6650 return llvm::makeArrayRef(Elems) == Ty->elements();
6653 // Get the coercion type as a literal struct type.
6654 llvm::Type *getType() const {
6655 if (Elems.size() == 1)
6656 return Elems.front();
6658 return llvm::StructType::get(Context, Elems);
6662 } // end anonymous namespace
6665 SparcV9ABIInfo::classifyType(QualType Ty, unsigned SizeLimit) const {
6666 if (Ty->isVoidType())
6667 return ABIArgInfo::getIgnore();
6669 uint64_t Size = getContext().getTypeSize(Ty);
6671 // Anything too big to fit in registers is passed with an explicit indirect
6672 // pointer / sret pointer.
6673 if (Size > SizeLimit)
6674 return getNaturalAlignIndirect(Ty, /*ByVal=*/false);
6676 // Treat an enum type as its underlying type.
6677 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
6678 Ty = EnumTy->getDecl()->getIntegerType();
6680 // Integer types smaller than a register are extended.
6681 if (Size < 64 && Ty->isIntegerType())
6682 return ABIArgInfo::getExtend();
6684 // Other non-aggregates go in registers.
6685 if (!isAggregateTypeForABI(Ty))
6686 return ABIArgInfo::getDirect();
6688 // If a C++ object has either a non-trivial copy constructor or a non-trivial
6689 // destructor, it is passed with an explicit indirect pointer / sret pointer.
6690 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
6691 return getNaturalAlignIndirect(Ty, RAA == CGCXXABI::RAA_DirectInMemory);
6693 // This is a small aggregate type that should be passed in registers.
6694 // Build a coercion type from the LLVM struct type.
6695 llvm::StructType *StrTy = dyn_cast<llvm::StructType>(CGT.ConvertType(Ty));
6697 return ABIArgInfo::getDirect();
6699 CoerceBuilder CB(getVMContext(), getDataLayout());
6700 CB.addStruct(0, StrTy);
6701 CB.pad(llvm::RoundUpToAlignment(CB.DL.getTypeSizeInBits(StrTy), 64));
6703 // Try to use the original type for coercion.
6704 llvm::Type *CoerceTy = CB.isUsableType(StrTy) ? StrTy : CB.getType();
6707 return ABIArgInfo::getDirectInReg(CoerceTy);
6709 return ABIArgInfo::getDirect(CoerceTy);
6712 Address SparcV9ABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
6713 QualType Ty) const {
6714 ABIArgInfo AI = classifyType(Ty, 16 * 8);
6715 llvm::Type *ArgTy = CGT.ConvertType(Ty);
6716 if (AI.canHaveCoerceToType() && !AI.getCoerceToType())
6717 AI.setCoerceToType(ArgTy);
6719 CharUnits SlotSize = CharUnits::fromQuantity(8);
6721 CGBuilderTy &Builder = CGF.Builder;
6722 Address Addr(Builder.CreateLoad(VAListAddr, "ap.cur"), SlotSize);
6723 llvm::Type *ArgPtrTy = llvm::PointerType::getUnqual(ArgTy);
6725 auto TypeInfo = getContext().getTypeInfoInChars(Ty);
6727 Address ArgAddr = Address::invalid();
6729 switch (AI.getKind()) {
6730 case ABIArgInfo::Expand:
6731 case ABIArgInfo::InAlloca:
6732 llvm_unreachable("Unsupported ABI kind for va_arg");
6734 case ABIArgInfo::Extend: {
6736 CharUnits Offset = SlotSize - TypeInfo.first;
6737 ArgAddr = Builder.CreateConstInBoundsByteGEP(Addr, Offset, "extend");
6741 case ABIArgInfo::Direct: {
6742 auto AllocSize = getDataLayout().getTypeAllocSize(AI.getCoerceToType());
6743 Stride = CharUnits::fromQuantity(AllocSize).RoundUpToAlignment(SlotSize);
6748 case ABIArgInfo::Indirect:
6750 ArgAddr = Builder.CreateElementBitCast(Addr, ArgPtrTy, "indirect");
6751 ArgAddr = Address(Builder.CreateLoad(ArgAddr, "indirect.arg"),
6755 case ABIArgInfo::Ignore:
6756 return Address(llvm::UndefValue::get(ArgPtrTy), TypeInfo.second);
6760 llvm::Value *NextPtr =
6761 Builder.CreateConstInBoundsByteGEP(Addr.getPointer(), Stride, "ap.next");
6762 Builder.CreateStore(NextPtr, VAListAddr);
6764 return Builder.CreateBitCast(ArgAddr, ArgPtrTy, "arg.addr");
6767 void SparcV9ABIInfo::computeInfo(CGFunctionInfo &FI) const {
6768 FI.getReturnInfo() = classifyType(FI.getReturnType(), 32 * 8);
6769 for (auto &I : FI.arguments())
6770 I.info = classifyType(I.type, 16 * 8);
6774 class SparcV9TargetCodeGenInfo : public TargetCodeGenInfo {
6776 SparcV9TargetCodeGenInfo(CodeGenTypes &CGT)
6777 : TargetCodeGenInfo(new SparcV9ABIInfo(CGT)) {}
6779 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
6783 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
6784 llvm::Value *Address) const override;
6786 } // end anonymous namespace
6789 SparcV9TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
6790 llvm::Value *Address) const {
6791 // This is calculated from the LLVM and GCC tables and verified
6792 // against gcc output. AFAIK all ABIs use the same encoding.
6794 CodeGen::CGBuilderTy &Builder = CGF.Builder;
6796 llvm::IntegerType *i8 = CGF.Int8Ty;
6797 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
6798 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
6800 // 0-31: the 8-byte general-purpose registers
6801 AssignToArrayRange(Builder, Address, Eight8, 0, 31);
6803 // 32-63: f0-31, the 4-byte floating-point registers
6804 AssignToArrayRange(Builder, Address, Four8, 32, 63);
6814 AssignToArrayRange(Builder, Address, Eight8, 64, 71);
6816 // 72-87: d0-15, the 8-byte floating-point registers
6817 AssignToArrayRange(Builder, Address, Eight8, 72, 87);
6823 //===----------------------------------------------------------------------===//
6824 // XCore ABI Implementation
6825 //===----------------------------------------------------------------------===//
6829 /// A SmallStringEnc instance is used to build up the TypeString by passing
6830 /// it by reference between functions that append to it.
6831 typedef llvm::SmallString<128> SmallStringEnc;
6833 /// TypeStringCache caches the meta encodings of Types.
6835 /// The reason for caching TypeStrings is two fold:
6836 /// 1. To cache a type's encoding for later uses;
6837 /// 2. As a means to break recursive member type inclusion.
6839 /// A cache Entry can have a Status of:
6840 /// NonRecursive: The type encoding is not recursive;
6841 /// Recursive: The type encoding is recursive;
6842 /// Incomplete: An incomplete TypeString;
6843 /// IncompleteUsed: An incomplete TypeString that has been used in a
6844 /// Recursive type encoding.
6846 /// A NonRecursive entry will have all of its sub-members expanded as fully
6847 /// as possible. Whilst it may contain types which are recursive, the type
6848 /// itself is not recursive and thus its encoding may be safely used whenever
6849 /// the type is encountered.
6851 /// A Recursive entry will have all of its sub-members expanded as fully as
6852 /// possible. The type itself is recursive and it may contain other types which
6853 /// are recursive. The Recursive encoding must not be used during the expansion
6854 /// of a recursive type's recursive branch. For simplicity the code uses
6855 /// IncompleteCount to reject all usage of Recursive encodings for member types.
6857 /// An Incomplete entry is always a RecordType and only encodes its
6858 /// identifier e.g. "s(S){}". Incomplete 'StubEnc' entries are ephemeral and
6859 /// are placed into the cache during type expansion as a means to identify and
6860 /// handle recursive inclusion of types as sub-members. If there is recursion
6861 /// the entry becomes IncompleteUsed.
6863 /// During the expansion of a RecordType's members:
6865 /// If the cache contains a NonRecursive encoding for the member type, the
6866 /// cached encoding is used;
6868 /// If the cache contains a Recursive encoding for the member type, the
6869 /// cached encoding is 'Swapped' out, as it may be incorrect, and...
6871 /// If the member is a RecordType, an Incomplete encoding is placed into the
6872 /// cache to break potential recursive inclusion of itself as a sub-member;
6874 /// Once a member RecordType has been expanded, its temporary incomplete
6875 /// entry is removed from the cache. If a Recursive encoding was swapped out
6876 /// it is swapped back in;
6878 /// If an incomplete entry is used to expand a sub-member, the incomplete
6879 /// entry is marked as IncompleteUsed. The cache keeps count of how many
6880 /// IncompleteUsed entries it currently contains in IncompleteUsedCount;
6882 /// If a member's encoding is found to be a NonRecursive or Recursive viz:
6883 /// IncompleteUsedCount==0, the member's encoding is added to the cache.
6884 /// Else the member is part of a recursive type and thus the recursion has
6885 /// been exited too soon for the encoding to be correct for the member.
6887 class TypeStringCache {
6888 enum Status {NonRecursive, Recursive, Incomplete, IncompleteUsed};
6890 std::string Str; // The encoded TypeString for the type.
6891 enum Status State; // Information about the encoding in 'Str'.
6892 std::string Swapped; // A temporary place holder for a Recursive encoding
6893 // during the expansion of RecordType's members.
6895 std::map<const IdentifierInfo *, struct Entry> Map;
6896 unsigned IncompleteCount; // Number of Incomplete entries in the Map.
6897 unsigned IncompleteUsedCount; // Number of IncompleteUsed entries in the Map.
6899 TypeStringCache() : IncompleteCount(0), IncompleteUsedCount(0) {}
6900 void addIncomplete(const IdentifierInfo *ID, std::string StubEnc);
6901 bool removeIncomplete(const IdentifierInfo *ID);
6902 void addIfComplete(const IdentifierInfo *ID, StringRef Str,
6904 StringRef lookupStr(const IdentifierInfo *ID);
6907 /// TypeString encodings for enum & union fields must be order.
6908 /// FieldEncoding is a helper for this ordering process.
6909 class FieldEncoding {
6913 FieldEncoding(bool b, SmallStringEnc &e) : HasName(b), Enc(e.c_str()) {}
6914 StringRef str() {return Enc.c_str();}
6915 bool operator<(const FieldEncoding &rhs) const {
6916 if (HasName != rhs.HasName) return HasName;
6917 return Enc < rhs.Enc;
6921 class XCoreABIInfo : public DefaultABIInfo {
6923 XCoreABIInfo(CodeGen::CodeGenTypes &CGT) : DefaultABIInfo(CGT) {}
6924 Address EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
6925 QualType Ty) const override;
6928 class XCoreTargetCodeGenInfo : public TargetCodeGenInfo {
6929 mutable TypeStringCache TSC;
6931 XCoreTargetCodeGenInfo(CodeGenTypes &CGT)
6932 :TargetCodeGenInfo(new XCoreABIInfo(CGT)) {}
6933 void emitTargetMD(const Decl *D, llvm::GlobalValue *GV,
6934 CodeGen::CodeGenModule &M) const override;
6937 } // End anonymous namespace.
6939 Address XCoreABIInfo::EmitVAArg(CodeGenFunction &CGF, Address VAListAddr,
6940 QualType Ty) const {
6941 CGBuilderTy &Builder = CGF.Builder;
6944 CharUnits SlotSize = CharUnits::fromQuantity(4);
6945 Address AP(Builder.CreateLoad(VAListAddr), SlotSize);
6947 // Handle the argument.
6948 ABIArgInfo AI = classifyArgumentType(Ty);
6949 CharUnits TypeAlign = getContext().getTypeAlignInChars(Ty);
6950 llvm::Type *ArgTy = CGT.ConvertType(Ty);
6951 if (AI.canHaveCoerceToType() && !AI.getCoerceToType())
6952 AI.setCoerceToType(ArgTy);
6953 llvm::Type *ArgPtrTy = llvm::PointerType::getUnqual(ArgTy);
6955 Address Val = Address::invalid();
6956 CharUnits ArgSize = CharUnits::Zero();
6957 switch (AI.getKind()) {
6958 case ABIArgInfo::Expand:
6959 case ABIArgInfo::InAlloca:
6960 llvm_unreachable("Unsupported ABI kind for va_arg");
6961 case ABIArgInfo::Ignore:
6962 Val = Address(llvm::UndefValue::get(ArgPtrTy), TypeAlign);
6963 ArgSize = CharUnits::Zero();
6965 case ABIArgInfo::Extend:
6966 case ABIArgInfo::Direct:
6967 Val = Builder.CreateBitCast(AP, ArgPtrTy);
6968 ArgSize = CharUnits::fromQuantity(
6969 getDataLayout().getTypeAllocSize(AI.getCoerceToType()));
6970 ArgSize = ArgSize.RoundUpToAlignment(SlotSize);
6972 case ABIArgInfo::Indirect:
6973 Val = Builder.CreateElementBitCast(AP, ArgPtrTy);
6974 Val = Address(Builder.CreateLoad(Val), TypeAlign);
6979 // Increment the VAList.
6980 if (!ArgSize.isZero()) {
6982 Builder.CreateConstInBoundsByteGEP(AP.getPointer(), ArgSize);
6983 Builder.CreateStore(APN, VAListAddr);
6989 /// During the expansion of a RecordType, an incomplete TypeString is placed
6990 /// into the cache as a means to identify and break recursion.
6991 /// If there is a Recursive encoding in the cache, it is swapped out and will
6992 /// be reinserted by removeIncomplete().
6993 /// All other types of encoding should have been used rather than arriving here.
6994 void TypeStringCache::addIncomplete(const IdentifierInfo *ID,
6995 std::string StubEnc) {
6999 assert( (E.Str.empty() || E.State == Recursive) &&
7000 "Incorrectly use of addIncomplete");
7001 assert(!StubEnc.empty() && "Passing an empty string to addIncomplete()");
7002 E.Swapped.swap(E.Str); // swap out the Recursive
7003 E.Str.swap(StubEnc);
7004 E.State = Incomplete;
7008 /// Once the RecordType has been expanded, the temporary incomplete TypeString
7009 /// must be removed from the cache.
7010 /// If a Recursive was swapped out by addIncomplete(), it will be replaced.
7011 /// Returns true if the RecordType was defined recursively.
7012 bool TypeStringCache::removeIncomplete(const IdentifierInfo *ID) {
7015 auto I = Map.find(ID);
7016 assert(I != Map.end() && "Entry not present");
7017 Entry &E = I->second;
7018 assert( (E.State == Incomplete ||
7019 E.State == IncompleteUsed) &&
7020 "Entry must be an incomplete type");
7021 bool IsRecursive = false;
7022 if (E.State == IncompleteUsed) {
7023 // We made use of our Incomplete encoding, thus we are recursive.
7025 --IncompleteUsedCount;
7027 if (E.Swapped.empty())
7030 // Swap the Recursive back.
7031 E.Swapped.swap(E.Str);
7033 E.State = Recursive;
7039 /// Add the encoded TypeString to the cache only if it is NonRecursive or
7040 /// Recursive (viz: all sub-members were expanded as fully as possible).
7041 void TypeStringCache::addIfComplete(const IdentifierInfo *ID, StringRef Str,
7043 if (!ID || IncompleteUsedCount)
7044 return; // No key or it is is an incomplete sub-type so don't add.
7046 if (IsRecursive && !E.Str.empty()) {
7047 assert(E.State==Recursive && E.Str.size() == Str.size() &&
7048 "This is not the same Recursive entry");
7049 // The parent container was not recursive after all, so we could have used
7050 // this Recursive sub-member entry after all, but we assumed the worse when
7051 // we started viz: IncompleteCount!=0.
7054 assert(E.Str.empty() && "Entry already present");
7056 E.State = IsRecursive? Recursive : NonRecursive;
7059 /// Return a cached TypeString encoding for the ID. If there isn't one, or we
7060 /// are recursively expanding a type (IncompleteCount != 0) and the cached
7061 /// encoding is Recursive, return an empty StringRef.
7062 StringRef TypeStringCache::lookupStr(const IdentifierInfo *ID) {
7064 return StringRef(); // We have no key.
7065 auto I = Map.find(ID);
7067 return StringRef(); // We have no encoding.
7068 Entry &E = I->second;
7069 if (E.State == Recursive && IncompleteCount)
7070 return StringRef(); // We don't use Recursive encodings for member types.
7072 if (E.State == Incomplete) {
7073 // The incomplete type is being used to break out of recursion.
7074 E.State = IncompleteUsed;
7075 ++IncompleteUsedCount;
7077 return E.Str.c_str();
7080 /// The XCore ABI includes a type information section that communicates symbol
7081 /// type information to the linker. The linker uses this information to verify
7082 /// safety/correctness of things such as array bound and pointers et al.
7083 /// The ABI only requires C (and XC) language modules to emit TypeStrings.
7084 /// This type information (TypeString) is emitted into meta data for all global
7085 /// symbols: definitions, declarations, functions & variables.
7087 /// The TypeString carries type, qualifier, name, size & value details.
7088 /// Please see 'Tools Development Guide' section 2.16.2 for format details:
7089 /// https://www.xmos.com/download/public/Tools-Development-Guide%28X9114A%29.pdf
7090 /// The output is tested by test/CodeGen/xcore-stringtype.c.
7092 static bool getTypeString(SmallStringEnc &Enc, const Decl *D,
7093 CodeGen::CodeGenModule &CGM, TypeStringCache &TSC);
7095 /// XCore uses emitTargetMD to emit TypeString metadata for global symbols.
7096 void XCoreTargetCodeGenInfo::emitTargetMD(const Decl *D, llvm::GlobalValue *GV,
7097 CodeGen::CodeGenModule &CGM) const {
7099 if (getTypeString(Enc, D, CGM, TSC)) {
7100 llvm::LLVMContext &Ctx = CGM.getModule().getContext();
7101 llvm::SmallVector<llvm::Metadata *, 2> MDVals;
7102 MDVals.push_back(llvm::ConstantAsMetadata::get(GV));
7103 MDVals.push_back(llvm::MDString::get(Ctx, Enc.str()));
7104 llvm::NamedMDNode *MD =
7105 CGM.getModule().getOrInsertNamedMetadata("xcore.typestrings");
7106 MD->addOperand(llvm::MDNode::get(Ctx, MDVals));
7110 static bool appendType(SmallStringEnc &Enc, QualType QType,
7111 const CodeGen::CodeGenModule &CGM,
7112 TypeStringCache &TSC);
7114 /// Helper function for appendRecordType().
7115 /// Builds a SmallVector containing the encoded field types in declaration
7117 static bool extractFieldType(SmallVectorImpl<FieldEncoding> &FE,
7118 const RecordDecl *RD,
7119 const CodeGen::CodeGenModule &CGM,
7120 TypeStringCache &TSC) {
7121 for (const auto *Field : RD->fields()) {
7124 Enc += Field->getName();
7126 if (Field->isBitField()) {
7128 llvm::raw_svector_ostream OS(Enc);
7129 OS << Field->getBitWidthValue(CGM.getContext());
7132 if (!appendType(Enc, Field->getType(), CGM, TSC))
7134 if (Field->isBitField())
7137 FE.emplace_back(!Field->getName().empty(), Enc);
7142 /// Appends structure and union types to Enc and adds encoding to cache.
7143 /// Recursively calls appendType (via extractFieldType) for each field.
7144 /// Union types have their fields ordered according to the ABI.
7145 static bool appendRecordType(SmallStringEnc &Enc, const RecordType *RT,
7146 const CodeGen::CodeGenModule &CGM,
7147 TypeStringCache &TSC, const IdentifierInfo *ID) {
7148 // Append the cached TypeString if we have one.
7149 StringRef TypeString = TSC.lookupStr(ID);
7150 if (!TypeString.empty()) {
7155 // Start to emit an incomplete TypeString.
7156 size_t Start = Enc.size();
7157 Enc += (RT->isUnionType()? 'u' : 's');
7160 Enc += ID->getName();
7163 // We collect all encoded fields and order as necessary.
7164 bool IsRecursive = false;
7165 const RecordDecl *RD = RT->getDecl()->getDefinition();
7166 if (RD && !RD->field_empty()) {
7167 // An incomplete TypeString stub is placed in the cache for this RecordType
7168 // so that recursive calls to this RecordType will use it whilst building a
7169 // complete TypeString for this RecordType.
7170 SmallVector<FieldEncoding, 16> FE;
7171 std::string StubEnc(Enc.substr(Start).str());
7172 StubEnc += '}'; // StubEnc now holds a valid incomplete TypeString.
7173 TSC.addIncomplete(ID, std::move(StubEnc));
7174 if (!extractFieldType(FE, RD, CGM, TSC)) {
7175 (void) TSC.removeIncomplete(ID);
7178 IsRecursive = TSC.removeIncomplete(ID);
7179 // The ABI requires unions to be sorted but not structures.
7180 // See FieldEncoding::operator< for sort algorithm.
7181 if (RT->isUnionType())
7182 std::sort(FE.begin(), FE.end());
7183 // We can now complete the TypeString.
7184 unsigned E = FE.size();
7185 for (unsigned I = 0; I != E; ++I) {
7192 TSC.addIfComplete(ID, Enc.substr(Start), IsRecursive);
7196 /// Appends enum types to Enc and adds the encoding to the cache.
7197 static bool appendEnumType(SmallStringEnc &Enc, const EnumType *ET,
7198 TypeStringCache &TSC,
7199 const IdentifierInfo *ID) {
7200 // Append the cached TypeString if we have one.
7201 StringRef TypeString = TSC.lookupStr(ID);
7202 if (!TypeString.empty()) {
7207 size_t Start = Enc.size();
7210 Enc += ID->getName();
7213 // We collect all encoded enumerations and order them alphanumerically.
7214 if (const EnumDecl *ED = ET->getDecl()->getDefinition()) {
7215 SmallVector<FieldEncoding, 16> FE;
7216 for (auto I = ED->enumerator_begin(), E = ED->enumerator_end(); I != E;
7218 SmallStringEnc EnumEnc;
7220 EnumEnc += I->getName();
7222 I->getInitVal().toString(EnumEnc);
7224 FE.push_back(FieldEncoding(!I->getName().empty(), EnumEnc));
7226 std::sort(FE.begin(), FE.end());
7227 unsigned E = FE.size();
7228 for (unsigned I = 0; I != E; ++I) {
7235 TSC.addIfComplete(ID, Enc.substr(Start), false);
7239 /// Appends type's qualifier to Enc.
7240 /// This is done prior to appending the type's encoding.
7241 static void appendQualifier(SmallStringEnc &Enc, QualType QT) {
7242 // Qualifiers are emitted in alphabetical order.
7243 static const char *const Table[]={"","c:","r:","cr:","v:","cv:","rv:","crv:"};
7245 if (QT.isConstQualified())
7247 if (QT.isRestrictQualified())
7249 if (QT.isVolatileQualified())
7251 Enc += Table[Lookup];
7254 /// Appends built-in types to Enc.
7255 static bool appendBuiltinType(SmallStringEnc &Enc, const BuiltinType *BT) {
7256 const char *EncType;
7257 switch (BT->getKind()) {
7258 case BuiltinType::Void:
7261 case BuiltinType::Bool:
7264 case BuiltinType::Char_U:
7267 case BuiltinType::UChar:
7270 case BuiltinType::SChar:
7273 case BuiltinType::UShort:
7276 case BuiltinType::Short:
7279 case BuiltinType::UInt:
7282 case BuiltinType::Int:
7285 case BuiltinType::ULong:
7288 case BuiltinType::Long:
7291 case BuiltinType::ULongLong:
7294 case BuiltinType::LongLong:
7297 case BuiltinType::Float:
7300 case BuiltinType::Double:
7303 case BuiltinType::LongDouble:
7313 /// Appends a pointer encoding to Enc before calling appendType for the pointee.
7314 static bool appendPointerType(SmallStringEnc &Enc, const PointerType *PT,
7315 const CodeGen::CodeGenModule &CGM,
7316 TypeStringCache &TSC) {
7318 if (!appendType(Enc, PT->getPointeeType(), CGM, TSC))
7324 /// Appends array encoding to Enc before calling appendType for the element.
7325 static bool appendArrayType(SmallStringEnc &Enc, QualType QT,
7326 const ArrayType *AT,
7327 const CodeGen::CodeGenModule &CGM,
7328 TypeStringCache &TSC, StringRef NoSizeEnc) {
7329 if (AT->getSizeModifier() != ArrayType::Normal)
7332 if (const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(AT))
7333 CAT->getSize().toStringUnsigned(Enc);
7335 Enc += NoSizeEnc; // Global arrays use "*", otherwise it is "".
7337 // The Qualifiers should be attached to the type rather than the array.
7338 appendQualifier(Enc, QT);
7339 if (!appendType(Enc, AT->getElementType(), CGM, TSC))
7345 /// Appends a function encoding to Enc, calling appendType for the return type
7346 /// and the arguments.
7347 static bool appendFunctionType(SmallStringEnc &Enc, const FunctionType *FT,
7348 const CodeGen::CodeGenModule &CGM,
7349 TypeStringCache &TSC) {
7351 if (!appendType(Enc, FT->getReturnType(), CGM, TSC))
7354 if (const FunctionProtoType *FPT = FT->getAs<FunctionProtoType>()) {
7355 // N.B. we are only interested in the adjusted param types.
7356 auto I = FPT->param_type_begin();
7357 auto E = FPT->param_type_end();
7360 if (!appendType(Enc, *I, CGM, TSC))
7366 if (FPT->isVariadic())
7369 if (FPT->isVariadic())
7379 /// Handles the type's qualifier before dispatching a call to handle specific
7381 static bool appendType(SmallStringEnc &Enc, QualType QType,
7382 const CodeGen::CodeGenModule &CGM,
7383 TypeStringCache &TSC) {
7385 QualType QT = QType.getCanonicalType();
7387 if (const ArrayType *AT = QT->getAsArrayTypeUnsafe())
7388 // The Qualifiers should be attached to the type rather than the array.
7389 // Thus we don't call appendQualifier() here.
7390 return appendArrayType(Enc, QT, AT, CGM, TSC, "");
7392 appendQualifier(Enc, QT);
7394 if (const BuiltinType *BT = QT->getAs<BuiltinType>())
7395 return appendBuiltinType(Enc, BT);
7397 if (const PointerType *PT = QT->getAs<PointerType>())
7398 return appendPointerType(Enc, PT, CGM, TSC);
7400 if (const EnumType *ET = QT->getAs<EnumType>())
7401 return appendEnumType(Enc, ET, TSC, QT.getBaseTypeIdentifier());
7403 if (const RecordType *RT = QT->getAsStructureType())
7404 return appendRecordType(Enc, RT, CGM, TSC, QT.getBaseTypeIdentifier());
7406 if (const RecordType *RT = QT->getAsUnionType())
7407 return appendRecordType(Enc, RT, CGM, TSC, QT.getBaseTypeIdentifier());
7409 if (const FunctionType *FT = QT->getAs<FunctionType>())
7410 return appendFunctionType(Enc, FT, CGM, TSC);
7415 static bool getTypeString(SmallStringEnc &Enc, const Decl *D,
7416 CodeGen::CodeGenModule &CGM, TypeStringCache &TSC) {
7420 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
7421 if (FD->getLanguageLinkage() != CLanguageLinkage)
7423 return appendType(Enc, FD->getType(), CGM, TSC);
7426 if (const VarDecl *VD = dyn_cast<VarDecl>(D)) {
7427 if (VD->getLanguageLinkage() != CLanguageLinkage)
7429 QualType QT = VD->getType().getCanonicalType();
7430 if (const ArrayType *AT = QT->getAsArrayTypeUnsafe()) {
7431 // Global ArrayTypes are given a size of '*' if the size is unknown.
7432 // The Qualifiers should be attached to the type rather than the array.
7433 // Thus we don't call appendQualifier() here.
7434 return appendArrayType(Enc, QT, AT, CGM, TSC, "*");
7436 return appendType(Enc, QT, CGM, TSC);
7442 //===----------------------------------------------------------------------===//
7444 //===----------------------------------------------------------------------===//
7446 const llvm::Triple &CodeGenModule::getTriple() const {
7447 return getTarget().getTriple();
7450 bool CodeGenModule::supportsCOMDAT() const {
7451 return !getTriple().isOSBinFormatMachO();
7454 const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() {
7455 if (TheTargetCodeGenInfo)
7456 return *TheTargetCodeGenInfo;
7458 const llvm::Triple &Triple = getTarget().getTriple();
7459 switch (Triple.getArch()) {
7461 return *(TheTargetCodeGenInfo = new DefaultTargetCodeGenInfo(Types));
7463 case llvm::Triple::le32:
7464 return *(TheTargetCodeGenInfo = new PNaClTargetCodeGenInfo(Types));
7465 case llvm::Triple::mips:
7466 case llvm::Triple::mipsel:
7467 if (Triple.getOS() == llvm::Triple::NaCl)
7468 return *(TheTargetCodeGenInfo = new PNaClTargetCodeGenInfo(Types));
7469 return *(TheTargetCodeGenInfo = new MIPSTargetCodeGenInfo(Types, true));
7471 case llvm::Triple::mips64:
7472 case llvm::Triple::mips64el:
7473 return *(TheTargetCodeGenInfo = new MIPSTargetCodeGenInfo(Types, false));
7475 case llvm::Triple::aarch64:
7476 case llvm::Triple::aarch64_be: {
7477 AArch64ABIInfo::ABIKind Kind = AArch64ABIInfo::AAPCS;
7478 if (getTarget().getABI() == "darwinpcs")
7479 Kind = AArch64ABIInfo::DarwinPCS;
7481 return *(TheTargetCodeGenInfo = new AArch64TargetCodeGenInfo(Types, Kind));
7484 case llvm::Triple::wasm32:
7485 case llvm::Triple::wasm64:
7486 return *(TheTargetCodeGenInfo = new WebAssemblyTargetCodeGenInfo(Types));
7488 case llvm::Triple::arm:
7489 case llvm::Triple::armeb:
7490 case llvm::Triple::thumb:
7491 case llvm::Triple::thumbeb:
7493 if (Triple.getOS() == llvm::Triple::Win32) {
7494 TheTargetCodeGenInfo =
7495 new WindowsARMTargetCodeGenInfo(Types, ARMABIInfo::AAPCS_VFP);
7496 return *TheTargetCodeGenInfo;
7499 ARMABIInfo::ABIKind Kind = ARMABIInfo::AAPCS;
7500 StringRef ABIStr = getTarget().getABI();
7501 if (ABIStr == "apcs-gnu")
7502 Kind = ARMABIInfo::APCS;
7503 else if (ABIStr == "aapcs16")
7504 Kind = ARMABIInfo::AAPCS16_VFP;
7505 else if (CodeGenOpts.FloatABI == "hard" ||
7506 (CodeGenOpts.FloatABI != "soft" &&
7507 Triple.getEnvironment() == llvm::Triple::GNUEABIHF))
7508 Kind = ARMABIInfo::AAPCS_VFP;
7510 return *(TheTargetCodeGenInfo = new ARMTargetCodeGenInfo(Types, Kind));
7513 case llvm::Triple::ppc:
7514 return *(TheTargetCodeGenInfo =
7515 new PPC32TargetCodeGenInfo(Types, CodeGenOpts.FloatABI == "soft"));
7516 case llvm::Triple::ppc64:
7517 if (Triple.isOSBinFormatELF()) {
7518 PPC64_SVR4_ABIInfo::ABIKind Kind = PPC64_SVR4_ABIInfo::ELFv1;
7519 if (getTarget().getABI() == "elfv2")
7520 Kind = PPC64_SVR4_ABIInfo::ELFv2;
7521 bool HasQPX = getTarget().getABI() == "elfv1-qpx";
7523 return *(TheTargetCodeGenInfo =
7524 new PPC64_SVR4_TargetCodeGenInfo(Types, Kind, HasQPX));
7526 return *(TheTargetCodeGenInfo = new PPC64TargetCodeGenInfo(Types));
7527 case llvm::Triple::ppc64le: {
7528 assert(Triple.isOSBinFormatELF() && "PPC64 LE non-ELF not supported!");
7529 PPC64_SVR4_ABIInfo::ABIKind Kind = PPC64_SVR4_ABIInfo::ELFv2;
7530 if (getTarget().getABI() == "elfv1" || getTarget().getABI() == "elfv1-qpx")
7531 Kind = PPC64_SVR4_ABIInfo::ELFv1;
7532 bool HasQPX = getTarget().getABI() == "elfv1-qpx";
7534 return *(TheTargetCodeGenInfo =
7535 new PPC64_SVR4_TargetCodeGenInfo(Types, Kind, HasQPX));
7538 case llvm::Triple::nvptx:
7539 case llvm::Triple::nvptx64:
7540 return *(TheTargetCodeGenInfo = new NVPTXTargetCodeGenInfo(Types));
7542 case llvm::Triple::msp430:
7543 return *(TheTargetCodeGenInfo = new MSP430TargetCodeGenInfo(Types));
7545 case llvm::Triple::systemz: {
7546 bool HasVector = getTarget().getABI() == "vector";
7547 return *(TheTargetCodeGenInfo = new SystemZTargetCodeGenInfo(Types,
7551 case llvm::Triple::tce:
7552 return *(TheTargetCodeGenInfo = new TCETargetCodeGenInfo(Types));
7554 case llvm::Triple::x86: {
7555 bool IsDarwinVectorABI = Triple.isOSDarwin();
7556 bool RetSmallStructInRegABI =
7557 X86_32TargetCodeGenInfo::isStructReturnInRegABI(Triple, CodeGenOpts);
7558 bool IsWin32FloatStructABI = Triple.isOSWindows() && !Triple.isOSCygMing();
7560 if (Triple.getOS() == llvm::Triple::Win32) {
7561 return *(TheTargetCodeGenInfo = new WinX86_32TargetCodeGenInfo(
7562 Types, IsDarwinVectorABI, RetSmallStructInRegABI,
7563 IsWin32FloatStructABI, CodeGenOpts.NumRegisterParameters));
7565 return *(TheTargetCodeGenInfo = new X86_32TargetCodeGenInfo(
7566 Types, IsDarwinVectorABI, RetSmallStructInRegABI,
7567 IsWin32FloatStructABI, CodeGenOpts.NumRegisterParameters,
7568 CodeGenOpts.FloatABI == "soft"));
7572 case llvm::Triple::x86_64: {
7573 StringRef ABI = getTarget().getABI();
7574 X86AVXABILevel AVXLevel = (ABI == "avx512" ? X86AVXABILevel::AVX512 :
7575 ABI == "avx" ? X86AVXABILevel::AVX :
7576 X86AVXABILevel::None);
7578 switch (Triple.getOS()) {
7579 case llvm::Triple::Win32:
7580 return *(TheTargetCodeGenInfo =
7581 new WinX86_64TargetCodeGenInfo(Types, AVXLevel));
7582 case llvm::Triple::PS4:
7583 return *(TheTargetCodeGenInfo =
7584 new PS4TargetCodeGenInfo(Types, AVXLevel));
7586 return *(TheTargetCodeGenInfo =
7587 new X86_64TargetCodeGenInfo(Types, AVXLevel));
7590 case llvm::Triple::hexagon:
7591 return *(TheTargetCodeGenInfo = new HexagonTargetCodeGenInfo(Types));
7592 case llvm::Triple::r600:
7593 return *(TheTargetCodeGenInfo = new AMDGPUTargetCodeGenInfo(Types));
7594 case llvm::Triple::amdgcn:
7595 return *(TheTargetCodeGenInfo = new AMDGPUTargetCodeGenInfo(Types));
7596 case llvm::Triple::sparcv9:
7597 return *(TheTargetCodeGenInfo = new SparcV9TargetCodeGenInfo(Types));
7598 case llvm::Triple::xcore:
7599 return *(TheTargetCodeGenInfo = new XCoreTargetCodeGenInfo(Types));