1 //===---- TargetInfo.cpp - Encapsulate target details -----------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // These classes wrap the information about a call or function
11 // definition used to handle ABI compliancy.
13 //===----------------------------------------------------------------------===//
15 #include "TargetInfo.h"
18 #include "CodeGenFunction.h"
19 #include "clang/AST/RecordLayout.h"
20 #include "clang/CodeGen/CGFunctionInfo.h"
21 #include "clang/Frontend/CodeGenOptions.h"
22 #include "llvm/ADT/Triple.h"
23 #include "llvm/IR/DataLayout.h"
24 #include "llvm/IR/Type.h"
25 #include "llvm/Support/raw_ostream.h"
27 #include <algorithm> // std::sort
29 using namespace clang;
30 using namespace CodeGen;
32 static void AssignToArrayRange(CodeGen::CGBuilderTy &Builder,
37 // Alternatively, we could emit this as a loop in the source.
38 for (unsigned I = FirstIndex; I <= LastIndex; ++I) {
39 llvm::Value *Cell = Builder.CreateConstInBoundsGEP1_32(Array, I);
40 Builder.CreateStore(Value, Cell);
44 static bool isAggregateTypeForABI(QualType T) {
45 return !CodeGenFunction::hasScalarEvaluationKind(T) ||
46 T->isMemberFunctionPointerType();
49 ABIInfo::~ABIInfo() {}
51 static CGCXXABI::RecordArgABI getRecordArgABI(const RecordType *RT,
53 const CXXRecordDecl *RD = dyn_cast<CXXRecordDecl>(RT->getDecl());
55 return CGCXXABI::RAA_Default;
56 return CXXABI.getRecordArgABI(RD);
59 static CGCXXABI::RecordArgABI getRecordArgABI(QualType T,
61 const RecordType *RT = T->getAs<RecordType>();
63 return CGCXXABI::RAA_Default;
64 return getRecordArgABI(RT, CXXABI);
67 CGCXXABI &ABIInfo::getCXXABI() const {
68 return CGT.getCXXABI();
71 ASTContext &ABIInfo::getContext() const {
72 return CGT.getContext();
75 llvm::LLVMContext &ABIInfo::getVMContext() const {
76 return CGT.getLLVMContext();
79 const llvm::DataLayout &ABIInfo::getDataLayout() const {
80 return CGT.getDataLayout();
83 const TargetInfo &ABIInfo::getTarget() const {
84 return CGT.getTarget();
87 void ABIArgInfo::dump() const {
88 raw_ostream &OS = llvm::errs();
89 OS << "(ABIArgInfo Kind=";
93 if (llvm::Type *Ty = getCoerceToType())
105 OS << "InAlloca Offset=" << getInAllocaFieldIndex();
108 OS << "Indirect Align=" << getIndirectAlign()
109 << " ByVal=" << getIndirectByVal()
110 << " Realign=" << getIndirectRealign();
119 TargetCodeGenInfo::~TargetCodeGenInfo() { delete Info; }
121 // If someone can figure out a general rule for this, that would be great.
122 // It's probably just doomed to be platform-dependent, though.
123 unsigned TargetCodeGenInfo::getSizeOfUnwindException() const {
125 // x86-64 FreeBSD, Linux, Darwin
126 // x86-32 FreeBSD, Linux, Darwin
127 // PowerPC Linux, Darwin
128 // ARM Darwin (*not* EABI)
133 bool TargetCodeGenInfo::isNoProtoCallVariadic(const CallArgList &args,
134 const FunctionNoProtoType *fnType) const {
135 // The following conventions are known to require this to be false:
138 // For everything else, we just prefer false unless we opt out.
143 TargetCodeGenInfo::getDependentLibraryOption(llvm::StringRef Lib,
144 llvm::SmallString<24> &Opt) const {
145 // This assumes the user is passing a library name like "rt" instead of a
146 // filename like "librt.a/so", and that they don't care whether it's static or
152 static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays);
154 /// isEmptyField - Return true iff a the field is "empty", that is it
155 /// is an unnamed bit-field or an (array of) empty record(s).
156 static bool isEmptyField(ASTContext &Context, const FieldDecl *FD,
158 if (FD->isUnnamedBitfield())
161 QualType FT = FD->getType();
163 // Constant arrays of empty records count as empty, strip them off.
164 // Constant arrays of zero length always count as empty.
166 while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) {
167 if (AT->getSize() == 0)
169 FT = AT->getElementType();
172 const RecordType *RT = FT->getAs<RecordType>();
176 // C++ record fields are never empty, at least in the Itanium ABI.
178 // FIXME: We should use a predicate for whether this behavior is true in the
180 if (isa<CXXRecordDecl>(RT->getDecl()))
183 return isEmptyRecord(Context, FT, AllowArrays);
186 /// isEmptyRecord - Return true iff a structure contains only empty
187 /// fields. Note that a structure with a flexible array member is not
188 /// considered empty.
189 static bool isEmptyRecord(ASTContext &Context, QualType T, bool AllowArrays) {
190 const RecordType *RT = T->getAs<RecordType>();
193 const RecordDecl *RD = RT->getDecl();
194 if (RD->hasFlexibleArrayMember())
197 // If this is a C++ record, check the bases first.
198 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
199 for (const auto &I : CXXRD->bases())
200 if (!isEmptyRecord(Context, I.getType(), true))
203 for (const auto *I : RD->fields())
204 if (!isEmptyField(Context, I, AllowArrays))
209 /// isSingleElementStruct - Determine if a structure is a "single
210 /// element struct", i.e. it has exactly one non-empty field or
211 /// exactly one field which is itself a single element
212 /// struct. Structures with flexible array members are never
213 /// considered single element structs.
215 /// \return The field declaration for the single non-empty field, if
217 static const Type *isSingleElementStruct(QualType T, ASTContext &Context) {
218 const RecordType *RT = T->getAsStructureType();
222 const RecordDecl *RD = RT->getDecl();
223 if (RD->hasFlexibleArrayMember())
226 const Type *Found = nullptr;
228 // If this is a C++ record, check the bases first.
229 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
230 for (const auto &I : CXXRD->bases()) {
231 // Ignore empty records.
232 if (isEmptyRecord(Context, I.getType(), true))
235 // If we already found an element then this isn't a single-element struct.
239 // If this is non-empty and not a single element struct, the composite
240 // cannot be a single element struct.
241 Found = isSingleElementStruct(I.getType(), Context);
247 // Check for single element.
248 for (const auto *FD : RD->fields()) {
249 QualType FT = FD->getType();
251 // Ignore empty fields.
252 if (isEmptyField(Context, FD, true))
255 // If we already found an element then this isn't a single-element
260 // Treat single element arrays as the element.
261 while (const ConstantArrayType *AT = Context.getAsConstantArrayType(FT)) {
262 if (AT->getSize().getZExtValue() != 1)
264 FT = AT->getElementType();
267 if (!isAggregateTypeForABI(FT)) {
268 Found = FT.getTypePtr();
270 Found = isSingleElementStruct(FT, Context);
276 // We don't consider a struct a single-element struct if it has
277 // padding beyond the element type.
278 if (Found && Context.getTypeSize(Found) != Context.getTypeSize(T))
284 static bool is32Or64BitBasicType(QualType Ty, ASTContext &Context) {
285 // Treat complex types as the element type.
286 if (const ComplexType *CTy = Ty->getAs<ComplexType>())
287 Ty = CTy->getElementType();
289 // Check for a type which we know has a simple scalar argument-passing
290 // convention without any padding. (We're specifically looking for 32
291 // and 64-bit integer and integer-equivalents, float, and double.)
292 if (!Ty->getAs<BuiltinType>() && !Ty->hasPointerRepresentation() &&
293 !Ty->isEnumeralType() && !Ty->isBlockPointerType())
296 uint64_t Size = Context.getTypeSize(Ty);
297 return Size == 32 || Size == 64;
300 /// canExpandIndirectArgument - Test whether an argument type which is to be
301 /// passed indirectly (on the stack) would have the equivalent layout if it was
302 /// expanded into separate arguments. If so, we prefer to do the latter to avoid
303 /// inhibiting optimizations.
305 // FIXME: This predicate is missing many cases, currently it just follows
306 // llvm-gcc (checks that all fields are 32-bit or 64-bit primitive types). We
307 // should probably make this smarter, or better yet make the LLVM backend
308 // capable of handling it.
309 static bool canExpandIndirectArgument(QualType Ty, ASTContext &Context) {
310 // We can only expand structure types.
311 const RecordType *RT = Ty->getAs<RecordType>();
315 // We can only expand (C) structures.
317 // FIXME: This needs to be generalized to handle classes as well.
318 const RecordDecl *RD = RT->getDecl();
319 if (!RD->isStruct() || isa<CXXRecordDecl>(RD))
324 for (const auto *FD : RD->fields()) {
325 if (!is32Or64BitBasicType(FD->getType(), Context))
328 // FIXME: Reject bit-fields wholesale; there are two problems, we don't know
329 // how to expand them yet, and the predicate for telling if a bitfield still
330 // counts as "basic" is more complicated than what we were doing previously.
331 if (FD->isBitField())
334 Size += Context.getTypeSize(FD->getType());
337 // Make sure there are not any holes in the struct.
338 if (Size != Context.getTypeSize(Ty))
345 /// DefaultABIInfo - The default implementation for ABI specific
346 /// details. This implementation provides information which results in
347 /// self-consistent and sensible LLVM IR generation, but does not
348 /// conform to any particular ABI.
349 class DefaultABIInfo : public ABIInfo {
351 DefaultABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {}
353 ABIArgInfo classifyReturnType(QualType RetTy) const;
354 ABIArgInfo classifyArgumentType(QualType RetTy) const;
356 void computeInfo(CGFunctionInfo &FI) const override {
357 if (!getCXXABI().classifyReturnType(FI))
358 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
359 for (auto &I : FI.arguments())
360 I.info = classifyArgumentType(I.type);
363 llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
364 CodeGenFunction &CGF) const override;
367 class DefaultTargetCodeGenInfo : public TargetCodeGenInfo {
369 DefaultTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
370 : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {}
373 llvm::Value *DefaultABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
374 CodeGenFunction &CGF) const {
378 ABIArgInfo DefaultABIInfo::classifyArgumentType(QualType Ty) const {
379 if (isAggregateTypeForABI(Ty))
380 return ABIArgInfo::getIndirect(0);
382 // Treat an enum type as its underlying type.
383 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
384 Ty = EnumTy->getDecl()->getIntegerType();
386 return (Ty->isPromotableIntegerType() ?
387 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
390 ABIArgInfo DefaultABIInfo::classifyReturnType(QualType RetTy) const {
391 if (RetTy->isVoidType())
392 return ABIArgInfo::getIgnore();
394 if (isAggregateTypeForABI(RetTy))
395 return ABIArgInfo::getIndirect(0);
397 // Treat an enum type as its underlying type.
398 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
399 RetTy = EnumTy->getDecl()->getIntegerType();
401 return (RetTy->isPromotableIntegerType() ?
402 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
405 //===----------------------------------------------------------------------===//
406 // le32/PNaCl bitcode ABI Implementation
408 // This is a simplified version of the x86_32 ABI. Arguments and return values
409 // are always passed on the stack.
410 //===----------------------------------------------------------------------===//
412 class PNaClABIInfo : public ABIInfo {
414 PNaClABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {}
416 ABIArgInfo classifyReturnType(QualType RetTy) const;
417 ABIArgInfo classifyArgumentType(QualType RetTy) const;
419 void computeInfo(CGFunctionInfo &FI) const override;
420 llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
421 CodeGenFunction &CGF) const override;
424 class PNaClTargetCodeGenInfo : public TargetCodeGenInfo {
426 PNaClTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
427 : TargetCodeGenInfo(new PNaClABIInfo(CGT)) {}
430 void PNaClABIInfo::computeInfo(CGFunctionInfo &FI) const {
431 if (!getCXXABI().classifyReturnType(FI))
432 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
434 for (auto &I : FI.arguments())
435 I.info = classifyArgumentType(I.type);
438 llvm::Value *PNaClABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
439 CodeGenFunction &CGF) const {
443 /// \brief Classify argument of given type \p Ty.
444 ABIArgInfo PNaClABIInfo::classifyArgumentType(QualType Ty) const {
445 if (isAggregateTypeForABI(Ty)) {
446 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
447 return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
448 return ABIArgInfo::getIndirect(0);
449 } else if (const EnumType *EnumTy = Ty->getAs<EnumType>()) {
450 // Treat an enum type as its underlying type.
451 Ty = EnumTy->getDecl()->getIntegerType();
452 } else if (Ty->isFloatingType()) {
453 // Floating-point types don't go inreg.
454 return ABIArgInfo::getDirect();
457 return (Ty->isPromotableIntegerType() ?
458 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
461 ABIArgInfo PNaClABIInfo::classifyReturnType(QualType RetTy) const {
462 if (RetTy->isVoidType())
463 return ABIArgInfo::getIgnore();
465 // In the PNaCl ABI we always return records/structures on the stack.
466 if (isAggregateTypeForABI(RetTy))
467 return ABIArgInfo::getIndirect(0);
469 // Treat an enum type as its underlying type.
470 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
471 RetTy = EnumTy->getDecl()->getIntegerType();
473 return (RetTy->isPromotableIntegerType() ?
474 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
477 /// IsX86_MMXType - Return true if this is an MMX type.
478 bool IsX86_MMXType(llvm::Type *IRType) {
479 // Return true if the type is an MMX type <2 x i32>, <4 x i16>, or <8 x i8>.
480 return IRType->isVectorTy() && IRType->getPrimitiveSizeInBits() == 64 &&
481 cast<llvm::VectorType>(IRType)->getElementType()->isIntegerTy() &&
482 IRType->getScalarSizeInBits() != 64;
485 static llvm::Type* X86AdjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
486 StringRef Constraint,
488 if ((Constraint == "y" || Constraint == "&y") && Ty->isVectorTy()) {
489 if (cast<llvm::VectorType>(Ty)->getBitWidth() != 64) {
490 // Invalid MMX constraint
494 return llvm::Type::getX86_MMXTy(CGF.getLLVMContext());
497 // No operation needed
501 //===----------------------------------------------------------------------===//
502 // X86-32 ABI Implementation
503 //===----------------------------------------------------------------------===//
505 /// \brief Similar to llvm::CCState, but for Clang.
507 CCState(unsigned CC) : CC(CC), FreeRegs(0) {}
511 unsigned StackOffset;
515 /// X86_32ABIInfo - The X86-32 ABI information.
516 class X86_32ABIInfo : public ABIInfo {
522 static const unsigned MinABIStackAlignInBytes = 4;
524 bool IsDarwinVectorABI;
525 bool IsSmallStructInRegABI;
526 bool IsWin32StructABI;
527 unsigned DefaultNumRegisterParameters;
529 static bool isRegisterSize(unsigned Size) {
530 return (Size == 8 || Size == 16 || Size == 32 || Size == 64);
533 bool shouldReturnTypeInRegister(QualType Ty, ASTContext &Context) const;
535 /// getIndirectResult - Give a source type \arg Ty, return a suitable result
536 /// such that the argument will be passed in memory.
537 ABIArgInfo getIndirectResult(QualType Ty, bool ByVal, CCState &State) const;
539 ABIArgInfo getIndirectReturnResult(CCState &State) const;
541 /// \brief Return the alignment to use for the given type on the stack.
542 unsigned getTypeStackAlignInBytes(QualType Ty, unsigned Align) const;
544 Class classify(QualType Ty) const;
545 ABIArgInfo classifyReturnType(QualType RetTy, CCState &State) const;
546 ABIArgInfo classifyArgumentType(QualType RetTy, CCState &State) const;
547 bool shouldUseInReg(QualType Ty, CCState &State, bool &NeedsPadding) const;
549 /// \brief Rewrite the function info so that all memory arguments use
551 void rewriteWithInAlloca(CGFunctionInfo &FI) const;
553 void addFieldToArgStruct(SmallVector<llvm::Type *, 6> &FrameFields,
554 unsigned &StackOffset, ABIArgInfo &Info,
555 QualType Type) const;
559 void computeInfo(CGFunctionInfo &FI) const override;
560 llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
561 CodeGenFunction &CGF) const override;
563 X86_32ABIInfo(CodeGen::CodeGenTypes &CGT, bool d, bool p, bool w,
565 : ABIInfo(CGT), IsDarwinVectorABI(d), IsSmallStructInRegABI(p),
566 IsWin32StructABI(w), DefaultNumRegisterParameters(r) {}
569 class X86_32TargetCodeGenInfo : public TargetCodeGenInfo {
571 X86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT,
572 bool d, bool p, bool w, unsigned r)
573 :TargetCodeGenInfo(new X86_32ABIInfo(CGT, d, p, w, r)) {}
575 static bool isStructReturnInRegABI(
576 const llvm::Triple &Triple, const CodeGenOptions &Opts);
578 void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
579 CodeGen::CodeGenModule &CGM) const override;
581 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
582 // Darwin uses different dwarf register numbers for EH.
583 if (CGM.getTarget().getTriple().isOSDarwin()) return 5;
587 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
588 llvm::Value *Address) const override;
590 llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
591 StringRef Constraint,
592 llvm::Type* Ty) const override {
593 return X86AdjustInlineAsmType(CGF, Constraint, Ty);
597 getUBSanFunctionSignature(CodeGen::CodeGenModule &CGM) const override {
598 unsigned Sig = (0xeb << 0) | // jmp rel8
599 (0x06 << 8) | // .+0x08
602 return llvm::ConstantInt::get(CGM.Int32Ty, Sig);
609 /// shouldReturnTypeInRegister - Determine if the given type should be
610 /// passed in a register (for the Darwin ABI).
611 bool X86_32ABIInfo::shouldReturnTypeInRegister(QualType Ty,
612 ASTContext &Context) const {
613 uint64_t Size = Context.getTypeSize(Ty);
615 // Type must be register sized.
616 if (!isRegisterSize(Size))
619 if (Ty->isVectorType()) {
620 // 64- and 128- bit vectors inside structures are not returned in
622 if (Size == 64 || Size == 128)
628 // If this is a builtin, pointer, enum, complex type, member pointer, or
629 // member function pointer it is ok.
630 if (Ty->getAs<BuiltinType>() || Ty->hasPointerRepresentation() ||
631 Ty->isAnyComplexType() || Ty->isEnumeralType() ||
632 Ty->isBlockPointerType() || Ty->isMemberPointerType())
635 // Arrays are treated like records.
636 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty))
637 return shouldReturnTypeInRegister(AT->getElementType(), Context);
639 // Otherwise, it must be a record type.
640 const RecordType *RT = Ty->getAs<RecordType>();
641 if (!RT) return false;
643 // FIXME: Traverse bases here too.
645 // Structure types are passed in register if all fields would be
646 // passed in a register.
647 for (const auto *FD : RT->getDecl()->fields()) {
648 // Empty fields are ignored.
649 if (isEmptyField(Context, FD, true))
652 // Check fields recursively.
653 if (!shouldReturnTypeInRegister(FD->getType(), Context))
659 ABIArgInfo X86_32ABIInfo::getIndirectReturnResult(CCState &State) const {
660 // If the return value is indirect, then the hidden argument is consuming one
662 if (State.FreeRegs) {
664 return ABIArgInfo::getIndirectInReg(/*Align=*/0, /*ByVal=*/false);
666 return ABIArgInfo::getIndirect(/*Align=*/0, /*ByVal=*/false);
669 ABIArgInfo X86_32ABIInfo::classifyReturnType(QualType RetTy, CCState &State) const {
670 if (RetTy->isVoidType())
671 return ABIArgInfo::getIgnore();
673 if (const VectorType *VT = RetTy->getAs<VectorType>()) {
674 // On Darwin, some vectors are returned in registers.
675 if (IsDarwinVectorABI) {
676 uint64_t Size = getContext().getTypeSize(RetTy);
678 // 128-bit vectors are a special case; they are returned in
679 // registers and we need to make sure to pick a type the LLVM
680 // backend will like.
682 return ABIArgInfo::getDirect(llvm::VectorType::get(
683 llvm::Type::getInt64Ty(getVMContext()), 2));
685 // Always return in register if it fits in a general purpose
686 // register, or if it is 64 bits and has a single element.
687 if ((Size == 8 || Size == 16 || Size == 32) ||
688 (Size == 64 && VT->getNumElements() == 1))
689 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
692 return getIndirectReturnResult(State);
695 return ABIArgInfo::getDirect();
698 if (isAggregateTypeForABI(RetTy)) {
699 if (const RecordType *RT = RetTy->getAs<RecordType>()) {
700 // Structures with flexible arrays are always indirect.
701 if (RT->getDecl()->hasFlexibleArrayMember())
702 return getIndirectReturnResult(State);
705 // If specified, structs and unions are always indirect.
706 if (!IsSmallStructInRegABI && !RetTy->isAnyComplexType())
707 return getIndirectReturnResult(State);
709 // Small structures which are register sized are generally returned
711 if (shouldReturnTypeInRegister(RetTy, getContext())) {
712 uint64_t Size = getContext().getTypeSize(RetTy);
714 // As a special-case, if the struct is a "single-element" struct, and
715 // the field is of type "float" or "double", return it in a
716 // floating-point register. (MSVC does not apply this special case.)
717 // We apply a similar transformation for pointer types to improve the
718 // quality of the generated IR.
719 if (const Type *SeltTy = isSingleElementStruct(RetTy, getContext()))
720 if ((!IsWin32StructABI && SeltTy->isRealFloatingType())
721 || SeltTy->hasPointerRepresentation())
722 return ABIArgInfo::getDirect(CGT.ConvertType(QualType(SeltTy, 0)));
724 // FIXME: We should be able to narrow this integer in cases with dead
726 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),Size));
729 return getIndirectReturnResult(State);
732 // Treat an enum type as its underlying type.
733 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
734 RetTy = EnumTy->getDecl()->getIntegerType();
736 return (RetTy->isPromotableIntegerType() ?
737 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
740 static bool isSSEVectorType(ASTContext &Context, QualType Ty) {
741 return Ty->getAs<VectorType>() && Context.getTypeSize(Ty) == 128;
744 static bool isRecordWithSSEVectorType(ASTContext &Context, QualType Ty) {
745 const RecordType *RT = Ty->getAs<RecordType>();
748 const RecordDecl *RD = RT->getDecl();
750 // If this is a C++ record, check the bases first.
751 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
752 for (const auto &I : CXXRD->bases())
753 if (!isRecordWithSSEVectorType(Context, I.getType()))
756 for (const auto *i : RD->fields()) {
757 QualType FT = i->getType();
759 if (isSSEVectorType(Context, FT))
762 if (isRecordWithSSEVectorType(Context, FT))
769 unsigned X86_32ABIInfo::getTypeStackAlignInBytes(QualType Ty,
770 unsigned Align) const {
771 // Otherwise, if the alignment is less than or equal to the minimum ABI
772 // alignment, just use the default; the backend will handle this.
773 if (Align <= MinABIStackAlignInBytes)
774 return 0; // Use default alignment.
776 // On non-Darwin, the stack type alignment is always 4.
777 if (!IsDarwinVectorABI) {
778 // Set explicit alignment, since we may need to realign the top.
779 return MinABIStackAlignInBytes;
782 // Otherwise, if the type contains an SSE vector type, the alignment is 16.
783 if (Align >= 16 && (isSSEVectorType(getContext(), Ty) ||
784 isRecordWithSSEVectorType(getContext(), Ty)))
787 return MinABIStackAlignInBytes;
790 ABIArgInfo X86_32ABIInfo::getIndirectResult(QualType Ty, bool ByVal,
791 CCState &State) const {
793 if (State.FreeRegs) {
794 --State.FreeRegs; // Non-byval indirects just use one pointer.
795 return ABIArgInfo::getIndirectInReg(0, false);
797 return ABIArgInfo::getIndirect(0, false);
800 // Compute the byval alignment.
801 unsigned TypeAlign = getContext().getTypeAlign(Ty) / 8;
802 unsigned StackAlign = getTypeStackAlignInBytes(Ty, TypeAlign);
804 return ABIArgInfo::getIndirect(4, /*ByVal=*/true);
806 // If the stack alignment is less than the type alignment, realign the
808 bool Realign = TypeAlign > StackAlign;
809 return ABIArgInfo::getIndirect(StackAlign, /*ByVal=*/true, Realign);
812 X86_32ABIInfo::Class X86_32ABIInfo::classify(QualType Ty) const {
813 const Type *T = isSingleElementStruct(Ty, getContext());
817 if (const BuiltinType *BT = T->getAs<BuiltinType>()) {
818 BuiltinType::Kind K = BT->getKind();
819 if (K == BuiltinType::Float || K == BuiltinType::Double)
825 bool X86_32ABIInfo::shouldUseInReg(QualType Ty, CCState &State,
826 bool &NeedsPadding) const {
827 NeedsPadding = false;
828 Class C = classify(Ty);
832 unsigned Size = getContext().getTypeSize(Ty);
833 unsigned SizeInRegs = (Size + 31) / 32;
838 if (SizeInRegs > State.FreeRegs) {
843 State.FreeRegs -= SizeInRegs;
845 if (State.CC == llvm::CallingConv::X86_FastCall) {
849 if (Ty->isIntegralOrEnumerationType())
852 if (Ty->isPointerType())
855 if (Ty->isReferenceType())
867 ABIArgInfo X86_32ABIInfo::classifyArgumentType(QualType Ty,
868 CCState &State) const {
869 // FIXME: Set alignment on indirect arguments.
870 if (isAggregateTypeForABI(Ty)) {
871 if (const RecordType *RT = Ty->getAs<RecordType>()) {
872 // Check with the C++ ABI first.
873 CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI());
874 if (RAA == CGCXXABI::RAA_Indirect) {
875 return getIndirectResult(Ty, false, State);
876 } else if (RAA == CGCXXABI::RAA_DirectInMemory) {
877 // The field index doesn't matter, we'll fix it up later.
878 return ABIArgInfo::getInAlloca(/*FieldIndex=*/0);
881 // Structs are always byval on win32, regardless of what they contain.
882 if (IsWin32StructABI)
883 return getIndirectResult(Ty, true, State);
885 // Structures with flexible arrays are always indirect.
886 if (RT->getDecl()->hasFlexibleArrayMember())
887 return getIndirectResult(Ty, true, State);
890 // Ignore empty structs/unions.
891 if (isEmptyRecord(getContext(), Ty, true))
892 return ABIArgInfo::getIgnore();
894 llvm::LLVMContext &LLVMContext = getVMContext();
895 llvm::IntegerType *Int32 = llvm::Type::getInt32Ty(LLVMContext);
897 if (shouldUseInReg(Ty, State, NeedsPadding)) {
898 unsigned SizeInRegs = (getContext().getTypeSize(Ty) + 31) / 32;
899 SmallVector<llvm::Type*, 3> Elements(SizeInRegs, Int32);
900 llvm::Type *Result = llvm::StructType::get(LLVMContext, Elements);
901 return ABIArgInfo::getDirectInReg(Result);
903 llvm::IntegerType *PaddingType = NeedsPadding ? Int32 : nullptr;
905 // Expand small (<= 128-bit) record types when we know that the stack layout
906 // of those arguments will match the struct. This is important because the
907 // LLVM backend isn't smart enough to remove byval, which inhibits many
909 if (getContext().getTypeSize(Ty) <= 4*32 &&
910 canExpandIndirectArgument(Ty, getContext()))
911 return ABIArgInfo::getExpandWithPadding(
912 State.CC == llvm::CallingConv::X86_FastCall, PaddingType);
914 return getIndirectResult(Ty, true, State);
917 if (const VectorType *VT = Ty->getAs<VectorType>()) {
918 // On Darwin, some vectors are passed in memory, we handle this by passing
919 // it as an i8/i16/i32/i64.
920 if (IsDarwinVectorABI) {
921 uint64_t Size = getContext().getTypeSize(Ty);
922 if ((Size == 8 || Size == 16 || Size == 32) ||
923 (Size == 64 && VT->getNumElements() == 1))
924 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
928 if (IsX86_MMXType(CGT.ConvertType(Ty)))
929 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), 64));
931 return ABIArgInfo::getDirect();
935 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
936 Ty = EnumTy->getDecl()->getIntegerType();
939 bool InReg = shouldUseInReg(Ty, State, NeedsPadding);
941 if (Ty->isPromotableIntegerType()) {
943 return ABIArgInfo::getExtendInReg();
944 return ABIArgInfo::getExtend();
947 return ABIArgInfo::getDirectInReg();
948 return ABIArgInfo::getDirect();
951 void X86_32ABIInfo::computeInfo(CGFunctionInfo &FI) const {
952 CCState State(FI.getCallingConvention());
953 if (State.CC == llvm::CallingConv::X86_FastCall)
955 else if (FI.getHasRegParm())
956 State.FreeRegs = FI.getRegParm();
958 State.FreeRegs = DefaultNumRegisterParameters;
960 if (!getCXXABI().classifyReturnType(FI)) {
961 FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), State);
962 } else if (FI.getReturnInfo().isIndirect()) {
963 // The C++ ABI is not aware of register usage, so we have to check if the
964 // return value was sret and put it in a register ourselves if appropriate.
965 if (State.FreeRegs) {
966 --State.FreeRegs; // The sret parameter consumes a register.
967 FI.getReturnInfo().setInReg(true);
971 bool UsedInAlloca = false;
972 for (auto &I : FI.arguments()) {
973 I.info = classifyArgumentType(I.type, State);
974 UsedInAlloca |= (I.info.getKind() == ABIArgInfo::InAlloca);
977 // If we needed to use inalloca for any argument, do a second pass and rewrite
978 // all the memory arguments to use inalloca.
980 rewriteWithInAlloca(FI);
984 X86_32ABIInfo::addFieldToArgStruct(SmallVector<llvm::Type *, 6> &FrameFields,
985 unsigned &StackOffset,
986 ABIArgInfo &Info, QualType Type) const {
987 assert(StackOffset % 4U == 0 && "unaligned inalloca struct");
988 Info = ABIArgInfo::getInAlloca(FrameFields.size());
989 FrameFields.push_back(CGT.ConvertTypeForMem(Type));
990 StackOffset += getContext().getTypeSizeInChars(Type).getQuantity();
992 // Insert padding bytes to respect alignment. For x86_32, each argument is 4
994 if (StackOffset % 4U) {
995 unsigned OldOffset = StackOffset;
996 StackOffset = llvm::RoundUpToAlignment(StackOffset, 4U);
997 unsigned NumBytes = StackOffset - OldOffset;
999 llvm::Type *Ty = llvm::Type::getInt8Ty(getVMContext());
1000 Ty = llvm::ArrayType::get(Ty, NumBytes);
1001 FrameFields.push_back(Ty);
1005 void X86_32ABIInfo::rewriteWithInAlloca(CGFunctionInfo &FI) const {
1006 assert(IsWin32StructABI && "inalloca only supported on win32");
1008 // Build a packed struct type for all of the arguments in memory.
1009 SmallVector<llvm::Type *, 6> FrameFields;
1011 unsigned StackOffset = 0;
1013 // Put the sret parameter into the inalloca struct if it's in memory.
1014 ABIArgInfo &Ret = FI.getReturnInfo();
1015 if (Ret.isIndirect() && !Ret.getInReg()) {
1016 CanQualType PtrTy = getContext().getPointerType(FI.getReturnType());
1017 addFieldToArgStruct(FrameFields, StackOffset, Ret, PtrTy);
1018 // On Windows, the hidden sret parameter is always returned in eax.
1019 Ret.setInAllocaSRet(IsWin32StructABI);
1022 // Skip the 'this' parameter in ecx.
1023 CGFunctionInfo::arg_iterator I = FI.arg_begin(), E = FI.arg_end();
1024 if (FI.getCallingConvention() == llvm::CallingConv::X86_ThisCall)
1027 // Put arguments passed in memory into the struct.
1028 for (; I != E; ++I) {
1030 // Leave ignored and inreg arguments alone.
1031 switch (I->info.getKind()) {
1032 case ABIArgInfo::Indirect:
1033 assert(I->info.getIndirectByVal());
1035 case ABIArgInfo::Ignore:
1037 case ABIArgInfo::Direct:
1038 case ABIArgInfo::Extend:
1039 if (I->info.getInReg())
1046 addFieldToArgStruct(FrameFields, StackOffset, I->info, I->type);
1049 FI.setArgStruct(llvm::StructType::get(getVMContext(), FrameFields,
1050 /*isPacked=*/true));
1053 llvm::Value *X86_32ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
1054 CodeGenFunction &CGF) const {
1055 llvm::Type *BPP = CGF.Int8PtrPtrTy;
1057 CGBuilderTy &Builder = CGF.Builder;
1058 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP,
1060 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
1062 // Compute if the address needs to be aligned
1063 unsigned Align = CGF.getContext().getTypeAlignInChars(Ty).getQuantity();
1064 Align = getTypeStackAlignInBytes(Ty, Align);
1065 Align = std::max(Align, 4U);
1067 // addr = (addr + align - 1) & -align;
1068 llvm::Value *Offset =
1069 llvm::ConstantInt::get(CGF.Int32Ty, Align - 1);
1070 Addr = CGF.Builder.CreateGEP(Addr, Offset);
1071 llvm::Value *AsInt = CGF.Builder.CreatePtrToInt(Addr,
1073 llvm::Value *Mask = llvm::ConstantInt::get(CGF.Int32Ty, -Align);
1074 Addr = CGF.Builder.CreateIntToPtr(CGF.Builder.CreateAnd(AsInt, Mask),
1080 llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
1081 llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
1084 llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, Align);
1085 llvm::Value *NextAddr =
1086 Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset),
1088 Builder.CreateStore(NextAddr, VAListAddrAsBPP);
1093 bool X86_32TargetCodeGenInfo::isStructReturnInRegABI(
1094 const llvm::Triple &Triple, const CodeGenOptions &Opts) {
1095 assert(Triple.getArch() == llvm::Triple::x86);
1097 switch (Opts.getStructReturnConvention()) {
1098 case CodeGenOptions::SRCK_Default:
1100 case CodeGenOptions::SRCK_OnStack: // -fpcc-struct-return
1102 case CodeGenOptions::SRCK_InRegs: // -freg-struct-return
1106 if (Triple.isOSDarwin())
1109 switch (Triple.getOS()) {
1110 case llvm::Triple::AuroraUX:
1111 case llvm::Triple::DragonFly:
1112 case llvm::Triple::FreeBSD:
1113 case llvm::Triple::OpenBSD:
1114 case llvm::Triple::Bitrig:
1116 case llvm::Triple::Win32:
1117 switch (Triple.getEnvironment()) {
1118 case llvm::Triple::UnknownEnvironment:
1119 case llvm::Triple::Cygnus:
1120 case llvm::Triple::GNU:
1121 case llvm::Triple::MSVC:
1131 void X86_32TargetCodeGenInfo::SetTargetAttributes(const Decl *D,
1132 llvm::GlobalValue *GV,
1133 CodeGen::CodeGenModule &CGM) const {
1134 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
1135 if (FD->hasAttr<X86ForceAlignArgPointerAttr>()) {
1136 // Get the LLVM function.
1137 llvm::Function *Fn = cast<llvm::Function>(GV);
1139 // Now add the 'alignstack' attribute with a value of 16.
1140 llvm::AttrBuilder B;
1141 B.addStackAlignmentAttr(16);
1142 Fn->addAttributes(llvm::AttributeSet::FunctionIndex,
1143 llvm::AttributeSet::get(CGM.getLLVMContext(),
1144 llvm::AttributeSet::FunctionIndex,
1150 bool X86_32TargetCodeGenInfo::initDwarfEHRegSizeTable(
1151 CodeGen::CodeGenFunction &CGF,
1152 llvm::Value *Address) const {
1153 CodeGen::CGBuilderTy &Builder = CGF.Builder;
1155 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4);
1157 // 0-7 are the eight integer registers; the order is different
1158 // on Darwin (for EH), but the range is the same.
1160 AssignToArrayRange(Builder, Address, Four8, 0, 8);
1162 if (CGF.CGM.getTarget().getTriple().isOSDarwin()) {
1163 // 12-16 are st(0..4). Not sure why we stop at 4.
1164 // These have size 16, which is sizeof(long double) on
1165 // platforms with 8-byte alignment for that type.
1166 llvm::Value *Sixteen8 = llvm::ConstantInt::get(CGF.Int8Ty, 16);
1167 AssignToArrayRange(Builder, Address, Sixteen8, 12, 16);
1170 // 9 is %eflags, which doesn't get a size on Darwin for some
1172 Builder.CreateStore(Four8, Builder.CreateConstInBoundsGEP1_32(Address, 9));
1174 // 11-16 are st(0..5). Not sure why we stop at 5.
1175 // These have size 12, which is sizeof(long double) on
1176 // platforms with 4-byte alignment for that type.
1177 llvm::Value *Twelve8 = llvm::ConstantInt::get(CGF.Int8Ty, 12);
1178 AssignToArrayRange(Builder, Address, Twelve8, 11, 16);
1184 //===----------------------------------------------------------------------===//
1185 // X86-64 ABI Implementation
1186 //===----------------------------------------------------------------------===//
1190 /// X86_64ABIInfo - The X86_64 ABI information.
1191 class X86_64ABIInfo : public ABIInfo {
1203 /// merge - Implement the X86_64 ABI merging algorithm.
1205 /// Merge an accumulating classification \arg Accum with a field
1206 /// classification \arg Field.
1208 /// \param Accum - The accumulating classification. This should
1209 /// always be either NoClass or the result of a previous merge
1210 /// call. In addition, this should never be Memory (the caller
1211 /// should just return Memory for the aggregate).
1212 static Class merge(Class Accum, Class Field);
1214 /// postMerge - Implement the X86_64 ABI post merging algorithm.
1216 /// Post merger cleanup, reduces a malformed Hi and Lo pair to
1217 /// final MEMORY or SSE classes when necessary.
1219 /// \param AggregateSize - The size of the current aggregate in
1220 /// the classification process.
1222 /// \param Lo - The classification for the parts of the type
1223 /// residing in the low word of the containing object.
1225 /// \param Hi - The classification for the parts of the type
1226 /// residing in the higher words of the containing object.
1228 void postMerge(unsigned AggregateSize, Class &Lo, Class &Hi) const;
1230 /// classify - Determine the x86_64 register classes in which the
1231 /// given type T should be passed.
1233 /// \param Lo - The classification for the parts of the type
1234 /// residing in the low word of the containing object.
1236 /// \param Hi - The classification for the parts of the type
1237 /// residing in the high word of the containing object.
1239 /// \param OffsetBase - The bit offset of this type in the
1240 /// containing object. Some parameters are classified different
1241 /// depending on whether they straddle an eightbyte boundary.
1243 /// \param isNamedArg - Whether the argument in question is a "named"
1244 /// argument, as used in AMD64-ABI 3.5.7.
1246 /// If a word is unused its result will be NoClass; if a type should
1247 /// be passed in Memory then at least the classification of \arg Lo
1250 /// The \arg Lo class will be NoClass iff the argument is ignored.
1252 /// If the \arg Lo class is ComplexX87, then the \arg Hi class will
1253 /// also be ComplexX87.
1254 void classify(QualType T, uint64_t OffsetBase, Class &Lo, Class &Hi,
1255 bool isNamedArg) const;
1257 llvm::Type *GetByteVectorType(QualType Ty) const;
1258 llvm::Type *GetSSETypeAtOffset(llvm::Type *IRType,
1259 unsigned IROffset, QualType SourceTy,
1260 unsigned SourceOffset) const;
1261 llvm::Type *GetINTEGERTypeAtOffset(llvm::Type *IRType,
1262 unsigned IROffset, QualType SourceTy,
1263 unsigned SourceOffset) const;
1265 /// getIndirectResult - Give a source type \arg Ty, return a suitable result
1266 /// such that the argument will be returned in memory.
1267 ABIArgInfo getIndirectReturnResult(QualType Ty) const;
1269 /// getIndirectResult - Give a source type \arg Ty, return a suitable result
1270 /// such that the argument will be passed in memory.
1272 /// \param freeIntRegs - The number of free integer registers remaining
1274 ABIArgInfo getIndirectResult(QualType Ty, unsigned freeIntRegs) const;
1276 ABIArgInfo classifyReturnType(QualType RetTy) const;
1278 ABIArgInfo classifyArgumentType(QualType Ty,
1279 unsigned freeIntRegs,
1280 unsigned &neededInt,
1281 unsigned &neededSSE,
1282 bool isNamedArg) const;
1284 bool IsIllegalVectorType(QualType Ty) const;
1286 /// The 0.98 ABI revision clarified a lot of ambiguities,
1287 /// unfortunately in ways that were not always consistent with
1288 /// certain previous compilers. In particular, platforms which
1289 /// required strict binary compatibility with older versions of GCC
1290 /// may need to exempt themselves.
1291 bool honorsRevision0_98() const {
1292 return !getTarget().getTriple().isOSDarwin();
1296 // Some ABIs (e.g. X32 ABI and Native Client OS) use 32 bit pointers on
1298 bool Has64BitPointers;
1301 X86_64ABIInfo(CodeGen::CodeGenTypes &CGT, bool hasavx) :
1302 ABIInfo(CGT), HasAVX(hasavx),
1303 Has64BitPointers(CGT.getDataLayout().getPointerSize(0) == 8) {
1306 bool isPassedUsingAVXType(QualType type) const {
1307 unsigned neededInt, neededSSE;
1308 // The freeIntRegs argument doesn't matter here.
1309 ABIArgInfo info = classifyArgumentType(type, 0, neededInt, neededSSE,
1310 /*isNamedArg*/true);
1311 if (info.isDirect()) {
1312 llvm::Type *ty = info.getCoerceToType();
1313 if (llvm::VectorType *vectorTy = dyn_cast_or_null<llvm::VectorType>(ty))
1314 return (vectorTy->getBitWidth() > 128);
1319 void computeInfo(CGFunctionInfo &FI) const override;
1321 llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
1322 CodeGenFunction &CGF) const override;
1325 /// WinX86_64ABIInfo - The Windows X86_64 ABI information.
1326 class WinX86_64ABIInfo : public ABIInfo {
1328 ABIArgInfo classify(QualType Ty, bool IsReturnType) const;
1331 WinX86_64ABIInfo(CodeGen::CodeGenTypes &CGT) : ABIInfo(CGT) {}
1333 void computeInfo(CGFunctionInfo &FI) const override;
1335 llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
1336 CodeGenFunction &CGF) const override;
1339 class X86_64TargetCodeGenInfo : public TargetCodeGenInfo {
1341 X86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool HasAVX)
1342 : TargetCodeGenInfo(new X86_64ABIInfo(CGT, HasAVX)) {}
1344 const X86_64ABIInfo &getABIInfo() const {
1345 return static_cast<const X86_64ABIInfo&>(TargetCodeGenInfo::getABIInfo());
1348 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
1352 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
1353 llvm::Value *Address) const override {
1354 llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8);
1356 // 0-15 are the 16 integer registers.
1358 AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 16);
1362 llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
1363 StringRef Constraint,
1364 llvm::Type* Ty) const override {
1365 return X86AdjustInlineAsmType(CGF, Constraint, Ty);
1368 bool isNoProtoCallVariadic(const CallArgList &args,
1369 const FunctionNoProtoType *fnType) const override {
1370 // The default CC on x86-64 sets %al to the number of SSA
1371 // registers used, and GCC sets this when calling an unprototyped
1372 // function, so we override the default behavior. However, don't do
1373 // that when AVX types are involved: the ABI explicitly states it is
1374 // undefined, and it doesn't work in practice because of how the ABI
1375 // defines varargs anyway.
1376 if (fnType->getCallConv() == CC_C) {
1377 bool HasAVXType = false;
1378 for (CallArgList::const_iterator
1379 it = args.begin(), ie = args.end(); it != ie; ++it) {
1380 if (getABIInfo().isPassedUsingAVXType(it->Ty)) {
1390 return TargetCodeGenInfo::isNoProtoCallVariadic(args, fnType);
1394 getUBSanFunctionSignature(CodeGen::CodeGenModule &CGM) const override {
1395 unsigned Sig = (0xeb << 0) | // jmp rel8
1396 (0x0a << 8) | // .+0x0c
1399 return llvm::ConstantInt::get(CGM.Int32Ty, Sig);
1404 static std::string qualifyWindowsLibrary(llvm::StringRef Lib) {
1405 // If the argument does not end in .lib, automatically add the suffix. This
1406 // matches the behavior of MSVC.
1407 std::string ArgStr = Lib;
1408 if (!Lib.endswith_lower(".lib"))
1413 class WinX86_32TargetCodeGenInfo : public X86_32TargetCodeGenInfo {
1415 WinX86_32TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT,
1416 bool d, bool p, bool w, unsigned RegParms)
1417 : X86_32TargetCodeGenInfo(CGT, d, p, w, RegParms) {}
1419 void getDependentLibraryOption(llvm::StringRef Lib,
1420 llvm::SmallString<24> &Opt) const override {
1421 Opt = "/DEFAULTLIB:";
1422 Opt += qualifyWindowsLibrary(Lib);
1425 void getDetectMismatchOption(llvm::StringRef Name,
1426 llvm::StringRef Value,
1427 llvm::SmallString<32> &Opt) const override {
1428 Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\"";
1432 class WinX86_64TargetCodeGenInfo : public TargetCodeGenInfo {
1434 WinX86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT)
1435 : TargetCodeGenInfo(new WinX86_64ABIInfo(CGT)) {}
1437 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
1441 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
1442 llvm::Value *Address) const override {
1443 llvm::Value *Eight8 = llvm::ConstantInt::get(CGF.Int8Ty, 8);
1445 // 0-15 are the 16 integer registers.
1447 AssignToArrayRange(CGF.Builder, Address, Eight8, 0, 16);
1451 void getDependentLibraryOption(llvm::StringRef Lib,
1452 llvm::SmallString<24> &Opt) const override {
1453 Opt = "/DEFAULTLIB:";
1454 Opt += qualifyWindowsLibrary(Lib);
1457 void getDetectMismatchOption(llvm::StringRef Name,
1458 llvm::StringRef Value,
1459 llvm::SmallString<32> &Opt) const override {
1460 Opt = "/FAILIFMISMATCH:\"" + Name.str() + "=" + Value.str() + "\"";
1466 void X86_64ABIInfo::postMerge(unsigned AggregateSize, Class &Lo,
1468 // AMD64-ABI 3.2.3p2: Rule 5. Then a post merger cleanup is done:
1470 // (a) If one of the classes is Memory, the whole argument is passed in
1473 // (b) If X87UP is not preceded by X87, the whole argument is passed in
1476 // (c) If the size of the aggregate exceeds two eightbytes and the first
1477 // eightbyte isn't SSE or any other eightbyte isn't SSEUP, the whole
1478 // argument is passed in memory. NOTE: This is necessary to keep the
1479 // ABI working for processors that don't support the __m256 type.
1481 // (d) If SSEUP is not preceded by SSE or SSEUP, it is converted to SSE.
1483 // Some of these are enforced by the merging logic. Others can arise
1484 // only with unions; for example:
1485 // union { _Complex double; unsigned; }
1487 // Note that clauses (b) and (c) were added in 0.98.
1491 if (Hi == X87Up && Lo != X87 && honorsRevision0_98())
1493 if (AggregateSize > 128 && (Lo != SSE || Hi != SSEUp))
1495 if (Hi == SSEUp && Lo != SSE)
1499 X86_64ABIInfo::Class X86_64ABIInfo::merge(Class Accum, Class Field) {
1500 // AMD64-ABI 3.2.3p2: Rule 4. Each field of an object is
1501 // classified recursively so that always two fields are
1502 // considered. The resulting class is calculated according to
1503 // the classes of the fields in the eightbyte:
1505 // (a) If both classes are equal, this is the resulting class.
1507 // (b) If one of the classes is NO_CLASS, the resulting class is
1510 // (c) If one of the classes is MEMORY, the result is the MEMORY
1513 // (d) If one of the classes is INTEGER, the result is the
1516 // (e) If one of the classes is X87, X87UP, COMPLEX_X87 class,
1517 // MEMORY is used as class.
1519 // (f) Otherwise class SSE is used.
1521 // Accum should never be memory (we should have returned) or
1522 // ComplexX87 (because this cannot be passed in a structure).
1523 assert((Accum != Memory && Accum != ComplexX87) &&
1524 "Invalid accumulated classification during merge.");
1525 if (Accum == Field || Field == NoClass)
1527 if (Field == Memory)
1529 if (Accum == NoClass)
1531 if (Accum == Integer || Field == Integer)
1533 if (Field == X87 || Field == X87Up || Field == ComplexX87 ||
1534 Accum == X87 || Accum == X87Up)
1539 void X86_64ABIInfo::classify(QualType Ty, uint64_t OffsetBase,
1540 Class &Lo, Class &Hi, bool isNamedArg) const {
1541 // FIXME: This code can be simplified by introducing a simple value class for
1542 // Class pairs with appropriate constructor methods for the various
1545 // FIXME: Some of the split computations are wrong; unaligned vectors
1546 // shouldn't be passed in registers for example, so there is no chance they
1547 // can straddle an eightbyte. Verify & simplify.
1551 Class &Current = OffsetBase < 64 ? Lo : Hi;
1554 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
1555 BuiltinType::Kind k = BT->getKind();
1557 if (k == BuiltinType::Void) {
1559 } else if (k == BuiltinType::Int128 || k == BuiltinType::UInt128) {
1562 } else if (k >= BuiltinType::Bool && k <= BuiltinType::LongLong) {
1564 } else if ((k == BuiltinType::Float || k == BuiltinType::Double) ||
1565 (k == BuiltinType::LongDouble &&
1566 getTarget().getTriple().isOSNaCl())) {
1568 } else if (k == BuiltinType::LongDouble) {
1572 // FIXME: _Decimal32 and _Decimal64 are SSE.
1573 // FIXME: _float128 and _Decimal128 are (SSE, SSEUp).
1577 if (const EnumType *ET = Ty->getAs<EnumType>()) {
1578 // Classify the underlying integer type.
1579 classify(ET->getDecl()->getIntegerType(), OffsetBase, Lo, Hi, isNamedArg);
1583 if (Ty->hasPointerRepresentation()) {
1588 if (Ty->isMemberPointerType()) {
1589 if (Ty->isMemberFunctionPointerType() && Has64BitPointers)
1596 if (const VectorType *VT = Ty->getAs<VectorType>()) {
1597 uint64_t Size = getContext().getTypeSize(VT);
1599 // gcc passes all <4 x char>, <2 x short>, <1 x int>, <1 x
1600 // float> as integer.
1603 // If this type crosses an eightbyte boundary, it should be
1605 uint64_t EB_Real = (OffsetBase) / 64;
1606 uint64_t EB_Imag = (OffsetBase + Size - 1) / 64;
1607 if (EB_Real != EB_Imag)
1609 } else if (Size == 64) {
1610 // gcc passes <1 x double> in memory. :(
1611 if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::Double))
1614 // gcc passes <1 x long long> as INTEGER.
1615 if (VT->getElementType()->isSpecificBuiltinType(BuiltinType::LongLong) ||
1616 VT->getElementType()->isSpecificBuiltinType(BuiltinType::ULongLong) ||
1617 VT->getElementType()->isSpecificBuiltinType(BuiltinType::Long) ||
1618 VT->getElementType()->isSpecificBuiltinType(BuiltinType::ULong))
1623 // If this type crosses an eightbyte boundary, it should be
1625 if (OffsetBase && OffsetBase != 64)
1627 } else if (Size == 128 || (HasAVX && isNamedArg && Size == 256)) {
1628 // Arguments of 256-bits are split into four eightbyte chunks. The
1629 // least significant one belongs to class SSE and all the others to class
1630 // SSEUP. The original Lo and Hi design considers that types can't be
1631 // greater than 128-bits, so a 64-bit split in Hi and Lo makes sense.
1632 // This design isn't correct for 256-bits, but since there're no cases
1633 // where the upper parts would need to be inspected, avoid adding
1634 // complexity and just consider Hi to match the 64-256 part.
1636 // Note that per 3.5.7 of AMD64-ABI, 256-bit args are only passed in
1637 // registers if they are "named", i.e. not part of the "..." of a
1638 // variadic function.
1645 if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
1646 QualType ET = getContext().getCanonicalType(CT->getElementType());
1648 uint64_t Size = getContext().getTypeSize(Ty);
1649 if (ET->isIntegralOrEnumerationType()) {
1652 else if (Size <= 128)
1654 } else if (ET == getContext().FloatTy)
1656 else if (ET == getContext().DoubleTy ||
1657 (ET == getContext().LongDoubleTy &&
1658 getTarget().getTriple().isOSNaCl()))
1660 else if (ET == getContext().LongDoubleTy)
1661 Current = ComplexX87;
1663 // If this complex type crosses an eightbyte boundary then it
1665 uint64_t EB_Real = (OffsetBase) / 64;
1666 uint64_t EB_Imag = (OffsetBase + getContext().getTypeSize(ET)) / 64;
1667 if (Hi == NoClass && EB_Real != EB_Imag)
1673 if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
1674 // Arrays are treated like structures.
1676 uint64_t Size = getContext().getTypeSize(Ty);
1678 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
1679 // than four eightbytes, ..., it has class MEMORY.
1683 // AMD64-ABI 3.2.3p2: Rule 1. If ..., or it contains unaligned
1684 // fields, it has class MEMORY.
1686 // Only need to check alignment of array base.
1687 if (OffsetBase % getContext().getTypeAlign(AT->getElementType()))
1690 // Otherwise implement simplified merge. We could be smarter about
1691 // this, but it isn't worth it and would be harder to verify.
1693 uint64_t EltSize = getContext().getTypeSize(AT->getElementType());
1694 uint64_t ArraySize = AT->getSize().getZExtValue();
1696 // The only case a 256-bit wide vector could be used is when the array
1697 // contains a single 256-bit element. Since Lo and Hi logic isn't extended
1698 // to work for sizes wider than 128, early check and fallback to memory.
1699 if (Size > 128 && EltSize != 256)
1702 for (uint64_t i=0, Offset=OffsetBase; i<ArraySize; ++i, Offset += EltSize) {
1703 Class FieldLo, FieldHi;
1704 classify(AT->getElementType(), Offset, FieldLo, FieldHi, isNamedArg);
1705 Lo = merge(Lo, FieldLo);
1706 Hi = merge(Hi, FieldHi);
1707 if (Lo == Memory || Hi == Memory)
1711 postMerge(Size, Lo, Hi);
1712 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp array classification.");
1716 if (const RecordType *RT = Ty->getAs<RecordType>()) {
1717 uint64_t Size = getContext().getTypeSize(Ty);
1719 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger
1720 // than four eightbytes, ..., it has class MEMORY.
1724 // AMD64-ABI 3.2.3p2: Rule 2. If a C++ object has either a non-trivial
1725 // copy constructor or a non-trivial destructor, it is passed by invisible
1727 if (getRecordArgABI(RT, getCXXABI()))
1730 const RecordDecl *RD = RT->getDecl();
1732 // Assume variable sized types are passed in memory.
1733 if (RD->hasFlexibleArrayMember())
1736 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
1738 // Reset Lo class, this will be recomputed.
1741 // If this is a C++ record, classify the bases first.
1742 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
1743 for (const auto &I : CXXRD->bases()) {
1744 assert(!I.isVirtual() && !I.getType()->isDependentType() &&
1745 "Unexpected base class!");
1746 const CXXRecordDecl *Base =
1747 cast<CXXRecordDecl>(I.getType()->getAs<RecordType>()->getDecl());
1749 // Classify this field.
1751 // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate exceeds a
1752 // single eightbyte, each is classified separately. Each eightbyte gets
1753 // initialized to class NO_CLASS.
1754 Class FieldLo, FieldHi;
1756 OffsetBase + getContext().toBits(Layout.getBaseClassOffset(Base));
1757 classify(I.getType(), Offset, FieldLo, FieldHi, isNamedArg);
1758 Lo = merge(Lo, FieldLo);
1759 Hi = merge(Hi, FieldHi);
1760 if (Lo == Memory || Hi == Memory)
1765 // Classify the fields one at a time, merging the results.
1767 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
1768 i != e; ++i, ++idx) {
1769 uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx);
1770 bool BitField = i->isBitField();
1772 // AMD64-ABI 3.2.3p2: Rule 1. If the size of an object is larger than
1773 // four eightbytes, or it contains unaligned fields, it has class MEMORY.
1775 // The only case a 256-bit wide vector could be used is when the struct
1776 // contains a single 256-bit element. Since Lo and Hi logic isn't extended
1777 // to work for sizes wider than 128, early check and fallback to memory.
1779 if (Size > 128 && getContext().getTypeSize(i->getType()) != 256) {
1783 // Note, skip this test for bit-fields, see below.
1784 if (!BitField && Offset % getContext().getTypeAlign(i->getType())) {
1789 // Classify this field.
1791 // AMD64-ABI 3.2.3p2: Rule 3. If the size of the aggregate
1792 // exceeds a single eightbyte, each is classified
1793 // separately. Each eightbyte gets initialized to class
1795 Class FieldLo, FieldHi;
1797 // Bit-fields require special handling, they do not force the
1798 // structure to be passed in memory even if unaligned, and
1799 // therefore they can straddle an eightbyte.
1801 // Ignore padding bit-fields.
1802 if (i->isUnnamedBitfield())
1805 uint64_t Offset = OffsetBase + Layout.getFieldOffset(idx);
1806 uint64_t Size = i->getBitWidthValue(getContext());
1808 uint64_t EB_Lo = Offset / 64;
1809 uint64_t EB_Hi = (Offset + Size - 1) / 64;
1812 assert(EB_Hi == EB_Lo && "Invalid classification, type > 16 bytes.");
1817 FieldHi = EB_Hi ? Integer : NoClass;
1820 classify(i->getType(), Offset, FieldLo, FieldHi, isNamedArg);
1821 Lo = merge(Lo, FieldLo);
1822 Hi = merge(Hi, FieldHi);
1823 if (Lo == Memory || Hi == Memory)
1827 postMerge(Size, Lo, Hi);
1831 ABIArgInfo X86_64ABIInfo::getIndirectReturnResult(QualType Ty) const {
1832 // If this is a scalar LLVM value then assume LLVM will pass it in the right
1834 if (!isAggregateTypeForABI(Ty)) {
1835 // Treat an enum type as its underlying type.
1836 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
1837 Ty = EnumTy->getDecl()->getIntegerType();
1839 return (Ty->isPromotableIntegerType() ?
1840 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
1843 return ABIArgInfo::getIndirect(0);
1846 bool X86_64ABIInfo::IsIllegalVectorType(QualType Ty) const {
1847 if (const VectorType *VecTy = Ty->getAs<VectorType>()) {
1848 uint64_t Size = getContext().getTypeSize(VecTy);
1849 unsigned LargestVector = HasAVX ? 256 : 128;
1850 if (Size <= 64 || Size > LargestVector)
1857 ABIArgInfo X86_64ABIInfo::getIndirectResult(QualType Ty,
1858 unsigned freeIntRegs) const {
1859 // If this is a scalar LLVM value then assume LLVM will pass it in the right
1862 // This assumption is optimistic, as there could be free registers available
1863 // when we need to pass this argument in memory, and LLVM could try to pass
1864 // the argument in the free register. This does not seem to happen currently,
1865 // but this code would be much safer if we could mark the argument with
1866 // 'onstack'. See PR12193.
1867 if (!isAggregateTypeForABI(Ty) && !IsIllegalVectorType(Ty)) {
1868 // Treat an enum type as its underlying type.
1869 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
1870 Ty = EnumTy->getDecl()->getIntegerType();
1872 return (Ty->isPromotableIntegerType() ?
1873 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
1876 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
1877 return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
1879 // Compute the byval alignment. We specify the alignment of the byval in all
1880 // cases so that the mid-level optimizer knows the alignment of the byval.
1881 unsigned Align = std::max(getContext().getTypeAlign(Ty) / 8, 8U);
1883 // Attempt to avoid passing indirect results using byval when possible. This
1884 // is important for good codegen.
1886 // We do this by coercing the value into a scalar type which the backend can
1887 // handle naturally (i.e., without using byval).
1889 // For simplicity, we currently only do this when we have exhausted all of the
1890 // free integer registers. Doing this when there are free integer registers
1891 // would require more care, as we would have to ensure that the coerced value
1892 // did not claim the unused register. That would require either reording the
1893 // arguments to the function (so that any subsequent inreg values came first),
1894 // or only doing this optimization when there were no following arguments that
1897 // We currently expect it to be rare (particularly in well written code) for
1898 // arguments to be passed on the stack when there are still free integer
1899 // registers available (this would typically imply large structs being passed
1900 // by value), so this seems like a fair tradeoff for now.
1902 // We can revisit this if the backend grows support for 'onstack' parameter
1903 // attributes. See PR12193.
1904 if (freeIntRegs == 0) {
1905 uint64_t Size = getContext().getTypeSize(Ty);
1907 // If this type fits in an eightbyte, coerce it into the matching integral
1908 // type, which will end up on the stack (with alignment 8).
1909 if (Align == 8 && Size <= 64)
1910 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
1914 return ABIArgInfo::getIndirect(Align);
1917 /// GetByteVectorType - The ABI specifies that a value should be passed in an
1918 /// full vector XMM/YMM register. Pick an LLVM IR type that will be passed as a
1919 /// vector register.
1920 llvm::Type *X86_64ABIInfo::GetByteVectorType(QualType Ty) const {
1921 llvm::Type *IRType = CGT.ConvertType(Ty);
1923 // Wrapper structs that just contain vectors are passed just like vectors,
1924 // strip them off if present.
1925 llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType);
1926 while (STy && STy->getNumElements() == 1) {
1927 IRType = STy->getElementType(0);
1928 STy = dyn_cast<llvm::StructType>(IRType);
1931 // If the preferred type is a 16-byte vector, prefer to pass it.
1932 if (llvm::VectorType *VT = dyn_cast<llvm::VectorType>(IRType)){
1933 llvm::Type *EltTy = VT->getElementType();
1934 unsigned BitWidth = VT->getBitWidth();
1935 if ((BitWidth >= 128 && BitWidth <= 256) &&
1936 (EltTy->isFloatTy() || EltTy->isDoubleTy() ||
1937 EltTy->isIntegerTy(8) || EltTy->isIntegerTy(16) ||
1938 EltTy->isIntegerTy(32) || EltTy->isIntegerTy(64) ||
1939 EltTy->isIntegerTy(128)))
1943 return llvm::VectorType::get(llvm::Type::getDoubleTy(getVMContext()), 2);
1946 /// BitsContainNoUserData - Return true if the specified [start,end) bit range
1947 /// is known to either be off the end of the specified type or being in
1948 /// alignment padding. The user type specified is known to be at most 128 bits
1949 /// in size, and have passed through X86_64ABIInfo::classify with a successful
1950 /// classification that put one of the two halves in the INTEGER class.
1952 /// It is conservatively correct to return false.
1953 static bool BitsContainNoUserData(QualType Ty, unsigned StartBit,
1954 unsigned EndBit, ASTContext &Context) {
1955 // If the bytes being queried are off the end of the type, there is no user
1956 // data hiding here. This handles analysis of builtins, vectors and other
1957 // types that don't contain interesting padding.
1958 unsigned TySize = (unsigned)Context.getTypeSize(Ty);
1959 if (TySize <= StartBit)
1962 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) {
1963 unsigned EltSize = (unsigned)Context.getTypeSize(AT->getElementType());
1964 unsigned NumElts = (unsigned)AT->getSize().getZExtValue();
1966 // Check each element to see if the element overlaps with the queried range.
1967 for (unsigned i = 0; i != NumElts; ++i) {
1968 // If the element is after the span we care about, then we're done..
1969 unsigned EltOffset = i*EltSize;
1970 if (EltOffset >= EndBit) break;
1972 unsigned EltStart = EltOffset < StartBit ? StartBit-EltOffset :0;
1973 if (!BitsContainNoUserData(AT->getElementType(), EltStart,
1974 EndBit-EltOffset, Context))
1977 // If it overlaps no elements, then it is safe to process as padding.
1981 if (const RecordType *RT = Ty->getAs<RecordType>()) {
1982 const RecordDecl *RD = RT->getDecl();
1983 const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
1985 // If this is a C++ record, check the bases first.
1986 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
1987 for (const auto &I : CXXRD->bases()) {
1988 assert(!I.isVirtual() && !I.getType()->isDependentType() &&
1989 "Unexpected base class!");
1990 const CXXRecordDecl *Base =
1991 cast<CXXRecordDecl>(I.getType()->getAs<RecordType>()->getDecl());
1993 // If the base is after the span we care about, ignore it.
1994 unsigned BaseOffset = Context.toBits(Layout.getBaseClassOffset(Base));
1995 if (BaseOffset >= EndBit) continue;
1997 unsigned BaseStart = BaseOffset < StartBit ? StartBit-BaseOffset :0;
1998 if (!BitsContainNoUserData(I.getType(), BaseStart,
1999 EndBit-BaseOffset, Context))
2004 // Verify that no field has data that overlaps the region of interest. Yes
2005 // this could be sped up a lot by being smarter about queried fields,
2006 // however we're only looking at structs up to 16 bytes, so we don't care
2009 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
2010 i != e; ++i, ++idx) {
2011 unsigned FieldOffset = (unsigned)Layout.getFieldOffset(idx);
2013 // If we found a field after the region we care about, then we're done.
2014 if (FieldOffset >= EndBit) break;
2016 unsigned FieldStart = FieldOffset < StartBit ? StartBit-FieldOffset :0;
2017 if (!BitsContainNoUserData(i->getType(), FieldStart, EndBit-FieldOffset,
2022 // If nothing in this record overlapped the area of interest, then we're
2030 /// ContainsFloatAtOffset - Return true if the specified LLVM IR type has a
2031 /// float member at the specified offset. For example, {int,{float}} has a
2032 /// float at offset 4. It is conservatively correct for this routine to return
2034 static bool ContainsFloatAtOffset(llvm::Type *IRType, unsigned IROffset,
2035 const llvm::DataLayout &TD) {
2036 // Base case if we find a float.
2037 if (IROffset == 0 && IRType->isFloatTy())
2040 // If this is a struct, recurse into the field at the specified offset.
2041 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {
2042 const llvm::StructLayout *SL = TD.getStructLayout(STy);
2043 unsigned Elt = SL->getElementContainingOffset(IROffset);
2044 IROffset -= SL->getElementOffset(Elt);
2045 return ContainsFloatAtOffset(STy->getElementType(Elt), IROffset, TD);
2048 // If this is an array, recurse into the field at the specified offset.
2049 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
2050 llvm::Type *EltTy = ATy->getElementType();
2051 unsigned EltSize = TD.getTypeAllocSize(EltTy);
2052 IROffset -= IROffset/EltSize*EltSize;
2053 return ContainsFloatAtOffset(EltTy, IROffset, TD);
2060 /// GetSSETypeAtOffset - Return a type that will be passed by the backend in the
2061 /// low 8 bytes of an XMM register, corresponding to the SSE class.
2062 llvm::Type *X86_64ABIInfo::
2063 GetSSETypeAtOffset(llvm::Type *IRType, unsigned IROffset,
2064 QualType SourceTy, unsigned SourceOffset) const {
2065 // The only three choices we have are either double, <2 x float>, or float. We
2066 // pass as float if the last 4 bytes is just padding. This happens for
2067 // structs that contain 3 floats.
2068 if (BitsContainNoUserData(SourceTy, SourceOffset*8+32,
2069 SourceOffset*8+64, getContext()))
2070 return llvm::Type::getFloatTy(getVMContext());
2072 // We want to pass as <2 x float> if the LLVM IR type contains a float at
2073 // offset+0 and offset+4. Walk the LLVM IR type to find out if this is the
2075 if (ContainsFloatAtOffset(IRType, IROffset, getDataLayout()) &&
2076 ContainsFloatAtOffset(IRType, IROffset+4, getDataLayout()))
2077 return llvm::VectorType::get(llvm::Type::getFloatTy(getVMContext()), 2);
2079 return llvm::Type::getDoubleTy(getVMContext());
2083 /// GetINTEGERTypeAtOffset - The ABI specifies that a value should be passed in
2084 /// an 8-byte GPR. This means that we either have a scalar or we are talking
2085 /// about the high or low part of an up-to-16-byte struct. This routine picks
2086 /// the best LLVM IR type to represent this, which may be i64 or may be anything
2087 /// else that the backend will pass in a GPR that works better (e.g. i8, %foo*,
2090 /// PrefType is an LLVM IR type that corresponds to (part of) the IR type for
2091 /// the source type. IROffset is an offset in bytes into the LLVM IR type that
2092 /// the 8-byte value references. PrefType may be null.
2094 /// SourceTy is the source-level type for the entire argument. SourceOffset is
2095 /// an offset into this that we're processing (which is always either 0 or 8).
2097 llvm::Type *X86_64ABIInfo::
2098 GetINTEGERTypeAtOffset(llvm::Type *IRType, unsigned IROffset,
2099 QualType SourceTy, unsigned SourceOffset) const {
2100 // If we're dealing with an un-offset LLVM IR type, then it means that we're
2101 // returning an 8-byte unit starting with it. See if we can safely use it.
2102 if (IROffset == 0) {
2103 // Pointers and int64's always fill the 8-byte unit.
2104 if ((isa<llvm::PointerType>(IRType) && Has64BitPointers) ||
2105 IRType->isIntegerTy(64))
2108 // If we have a 1/2/4-byte integer, we can use it only if the rest of the
2109 // goodness in the source type is just tail padding. This is allowed to
2110 // kick in for struct {double,int} on the int, but not on
2111 // struct{double,int,int} because we wouldn't return the second int. We
2112 // have to do this analysis on the source type because we can't depend on
2113 // unions being lowered a specific way etc.
2114 if (IRType->isIntegerTy(8) || IRType->isIntegerTy(16) ||
2115 IRType->isIntegerTy(32) ||
2116 (isa<llvm::PointerType>(IRType) && !Has64BitPointers)) {
2117 unsigned BitWidth = isa<llvm::PointerType>(IRType) ? 32 :
2118 cast<llvm::IntegerType>(IRType)->getBitWidth();
2120 if (BitsContainNoUserData(SourceTy, SourceOffset*8+BitWidth,
2121 SourceOffset*8+64, getContext()))
2126 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(IRType)) {
2127 // If this is a struct, recurse into the field at the specified offset.
2128 const llvm::StructLayout *SL = getDataLayout().getStructLayout(STy);
2129 if (IROffset < SL->getSizeInBytes()) {
2130 unsigned FieldIdx = SL->getElementContainingOffset(IROffset);
2131 IROffset -= SL->getElementOffset(FieldIdx);
2133 return GetINTEGERTypeAtOffset(STy->getElementType(FieldIdx), IROffset,
2134 SourceTy, SourceOffset);
2138 if (llvm::ArrayType *ATy = dyn_cast<llvm::ArrayType>(IRType)) {
2139 llvm::Type *EltTy = ATy->getElementType();
2140 unsigned EltSize = getDataLayout().getTypeAllocSize(EltTy);
2141 unsigned EltOffset = IROffset/EltSize*EltSize;
2142 return GetINTEGERTypeAtOffset(EltTy, IROffset-EltOffset, SourceTy,
2146 // Okay, we don't have any better idea of what to pass, so we pass this in an
2147 // integer register that isn't too big to fit the rest of the struct.
2148 unsigned TySizeInBytes =
2149 (unsigned)getContext().getTypeSizeInChars(SourceTy).getQuantity();
2151 assert(TySizeInBytes != SourceOffset && "Empty field?");
2153 // It is always safe to classify this as an integer type up to i64 that
2154 // isn't larger than the structure.
2155 return llvm::IntegerType::get(getVMContext(),
2156 std::min(TySizeInBytes-SourceOffset, 8U)*8);
2160 /// GetX86_64ByValArgumentPair - Given a high and low type that can ideally
2161 /// be used as elements of a two register pair to pass or return, return a
2162 /// first class aggregate to represent them. For example, if the low part of
2163 /// a by-value argument should be passed as i32* and the high part as float,
2164 /// return {i32*, float}.
2166 GetX86_64ByValArgumentPair(llvm::Type *Lo, llvm::Type *Hi,
2167 const llvm::DataLayout &TD) {
2168 // In order to correctly satisfy the ABI, we need to the high part to start
2169 // at offset 8. If the high and low parts we inferred are both 4-byte types
2170 // (e.g. i32 and i32) then the resultant struct type ({i32,i32}) won't have
2171 // the second element at offset 8. Check for this:
2172 unsigned LoSize = (unsigned)TD.getTypeAllocSize(Lo);
2173 unsigned HiAlign = TD.getABITypeAlignment(Hi);
2174 unsigned HiStart = llvm::DataLayout::RoundUpAlignment(LoSize, HiAlign);
2175 assert(HiStart != 0 && HiStart <= 8 && "Invalid x86-64 argument pair!");
2177 // To handle this, we have to increase the size of the low part so that the
2178 // second element will start at an 8 byte offset. We can't increase the size
2179 // of the second element because it might make us access off the end of the
2182 // There are only two sorts of types the ABI generation code can produce for
2183 // the low part of a pair that aren't 8 bytes in size: float or i8/i16/i32.
2184 // Promote these to a larger type.
2185 if (Lo->isFloatTy())
2186 Lo = llvm::Type::getDoubleTy(Lo->getContext());
2188 assert(Lo->isIntegerTy() && "Invalid/unknown lo type");
2189 Lo = llvm::Type::getInt64Ty(Lo->getContext());
2193 llvm::StructType *Result = llvm::StructType::get(Lo, Hi, NULL);
2196 // Verify that the second element is at an 8-byte offset.
2197 assert(TD.getStructLayout(Result)->getElementOffset(1) == 8 &&
2198 "Invalid x86-64 argument pair!");
2202 ABIArgInfo X86_64ABIInfo::
2203 classifyReturnType(QualType RetTy) const {
2204 // AMD64-ABI 3.2.3p4: Rule 1. Classify the return type with the
2205 // classification algorithm.
2206 X86_64ABIInfo::Class Lo, Hi;
2207 classify(RetTy, 0, Lo, Hi, /*isNamedArg*/ true);
2209 // Check some invariants.
2210 assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.");
2211 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.");
2213 llvm::Type *ResType = nullptr;
2217 return ABIArgInfo::getIgnore();
2218 // If the low part is just padding, it takes no register, leave ResType
2220 assert((Hi == SSE || Hi == Integer || Hi == X87Up) &&
2221 "Unknown missing lo part");
2226 llvm_unreachable("Invalid classification for lo word.");
2228 // AMD64-ABI 3.2.3p4: Rule 2. Types of class memory are returned via
2231 return getIndirectReturnResult(RetTy);
2233 // AMD64-ABI 3.2.3p4: Rule 3. If the class is INTEGER, the next
2234 // available register of the sequence %rax, %rdx is used.
2236 ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0);
2238 // If we have a sign or zero extended integer, make sure to return Extend
2239 // so that the parameter gets the right LLVM IR attributes.
2240 if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) {
2241 // Treat an enum type as its underlying type.
2242 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
2243 RetTy = EnumTy->getDecl()->getIntegerType();
2245 if (RetTy->isIntegralOrEnumerationType() &&
2246 RetTy->isPromotableIntegerType())
2247 return ABIArgInfo::getExtend();
2251 // AMD64-ABI 3.2.3p4: Rule 4. If the class is SSE, the next
2252 // available SSE register of the sequence %xmm0, %xmm1 is used.
2254 ResType = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 0, RetTy, 0);
2257 // AMD64-ABI 3.2.3p4: Rule 6. If the class is X87, the value is
2258 // returned on the X87 stack in %st0 as 80-bit x87 number.
2260 ResType = llvm::Type::getX86_FP80Ty(getVMContext());
2263 // AMD64-ABI 3.2.3p4: Rule 8. If the class is COMPLEX_X87, the real
2264 // part of the value is returned in %st0 and the imaginary part in
2267 assert(Hi == ComplexX87 && "Unexpected ComplexX87 classification.");
2268 ResType = llvm::StructType::get(llvm::Type::getX86_FP80Ty(getVMContext()),
2269 llvm::Type::getX86_FP80Ty(getVMContext()),
2274 llvm::Type *HighPart = nullptr;
2276 // Memory was handled previously and X87 should
2277 // never occur as a hi class.
2280 llvm_unreachable("Invalid classification for hi word.");
2282 case ComplexX87: // Previously handled.
2287 HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
2288 if (Lo == NoClass) // Return HighPart at offset 8 in memory.
2289 return ABIArgInfo::getDirect(HighPart, 8);
2292 HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
2293 if (Lo == NoClass) // Return HighPart at offset 8 in memory.
2294 return ABIArgInfo::getDirect(HighPart, 8);
2297 // AMD64-ABI 3.2.3p4: Rule 5. If the class is SSEUP, the eightbyte
2298 // is passed in the next available eightbyte chunk if the last used
2301 // SSEUP should always be preceded by SSE, just widen.
2303 assert(Lo == SSE && "Unexpected SSEUp classification.");
2304 ResType = GetByteVectorType(RetTy);
2307 // AMD64-ABI 3.2.3p4: Rule 7. If the class is X87UP, the value is
2308 // returned together with the previous X87 value in %st0.
2310 // If X87Up is preceded by X87, we don't need to do
2311 // anything. However, in some cases with unions it may not be
2312 // preceded by X87. In such situations we follow gcc and pass the
2313 // extra bits in an SSE reg.
2315 HighPart = GetSSETypeAtOffset(CGT.ConvertType(RetTy), 8, RetTy, 8);
2316 if (Lo == NoClass) // Return HighPart at offset 8 in memory.
2317 return ABIArgInfo::getDirect(HighPart, 8);
2322 // If a high part was specified, merge it together with the low part. It is
2323 // known to pass in the high eightbyte of the result. We do this by forming a
2324 // first class struct aggregate with the high and low part: {low, high}
2326 ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getDataLayout());
2328 return ABIArgInfo::getDirect(ResType);
2331 ABIArgInfo X86_64ABIInfo::classifyArgumentType(
2332 QualType Ty, unsigned freeIntRegs, unsigned &neededInt, unsigned &neededSSE,
2336 X86_64ABIInfo::Class Lo, Hi;
2337 classify(Ty, 0, Lo, Hi, isNamedArg);
2339 // Check some invariants.
2340 // FIXME: Enforce these by construction.
2341 assert((Hi != Memory || Lo == Memory) && "Invalid memory classification.");
2342 assert((Hi != SSEUp || Lo == SSE) && "Invalid SSEUp classification.");
2346 llvm::Type *ResType = nullptr;
2350 return ABIArgInfo::getIgnore();
2351 // If the low part is just padding, it takes no register, leave ResType
2353 assert((Hi == SSE || Hi == Integer || Hi == X87Up) &&
2354 "Unknown missing lo part");
2357 // AMD64-ABI 3.2.3p3: Rule 1. If the class is MEMORY, pass the argument
2361 // AMD64-ABI 3.2.3p3: Rule 5. If the class is X87, X87UP or
2362 // COMPLEX_X87, it is passed in memory.
2365 if (getRecordArgABI(Ty, getCXXABI()) == CGCXXABI::RAA_Indirect)
2367 return getIndirectResult(Ty, freeIntRegs);
2371 llvm_unreachable("Invalid classification for lo word.");
2373 // AMD64-ABI 3.2.3p3: Rule 2. If the class is INTEGER, the next
2374 // available register of the sequence %rdi, %rsi, %rdx, %rcx, %r8
2379 // Pick an 8-byte type based on the preferred type.
2380 ResType = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 0, Ty, 0);
2382 // If we have a sign or zero extended integer, make sure to return Extend
2383 // so that the parameter gets the right LLVM IR attributes.
2384 if (Hi == NoClass && isa<llvm::IntegerType>(ResType)) {
2385 // Treat an enum type as its underlying type.
2386 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
2387 Ty = EnumTy->getDecl()->getIntegerType();
2389 if (Ty->isIntegralOrEnumerationType() &&
2390 Ty->isPromotableIntegerType())
2391 return ABIArgInfo::getExtend();
2396 // AMD64-ABI 3.2.3p3: Rule 3. If the class is SSE, the next
2397 // available SSE register is used, the registers are taken in the
2398 // order from %xmm0 to %xmm7.
2400 llvm::Type *IRType = CGT.ConvertType(Ty);
2401 ResType = GetSSETypeAtOffset(IRType, 0, Ty, 0);
2407 llvm::Type *HighPart = nullptr;
2409 // Memory was handled previously, ComplexX87 and X87 should
2410 // never occur as hi classes, and X87Up must be preceded by X87,
2411 // which is passed in memory.
2415 llvm_unreachable("Invalid classification for hi word.");
2417 case NoClass: break;
2421 // Pick an 8-byte type based on the preferred type.
2422 HighPart = GetINTEGERTypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8);
2424 if (Lo == NoClass) // Pass HighPart at offset 8 in memory.
2425 return ABIArgInfo::getDirect(HighPart, 8);
2428 // X87Up generally doesn't occur here (long double is passed in
2429 // memory), except in situations involving unions.
2432 HighPart = GetSSETypeAtOffset(CGT.ConvertType(Ty), 8, Ty, 8);
2434 if (Lo == NoClass) // Pass HighPart at offset 8 in memory.
2435 return ABIArgInfo::getDirect(HighPart, 8);
2440 // AMD64-ABI 3.2.3p3: Rule 4. If the class is SSEUP, the
2441 // eightbyte is passed in the upper half of the last used SSE
2442 // register. This only happens when 128-bit vectors are passed.
2444 assert(Lo == SSE && "Unexpected SSEUp classification");
2445 ResType = GetByteVectorType(Ty);
2449 // If a high part was specified, merge it together with the low part. It is
2450 // known to pass in the high eightbyte of the result. We do this by forming a
2451 // first class struct aggregate with the high and low part: {low, high}
2453 ResType = GetX86_64ByValArgumentPair(ResType, HighPart, getDataLayout());
2455 return ABIArgInfo::getDirect(ResType);
2458 void X86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const {
2460 if (!getCXXABI().classifyReturnType(FI))
2461 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
2463 // Keep track of the number of assigned registers.
2464 unsigned freeIntRegs = 6, freeSSERegs = 8;
2466 // If the return value is indirect, then the hidden argument is consuming one
2467 // integer register.
2468 if (FI.getReturnInfo().isIndirect())
2471 bool isVariadic = FI.isVariadic();
2472 unsigned numRequiredArgs = 0;
2474 numRequiredArgs = FI.getRequiredArgs().getNumRequiredArgs();
2476 // AMD64-ABI 3.2.3p3: Once arguments are classified, the registers
2477 // get assigned (in left-to-right order) for passing as follows...
2478 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
2480 bool isNamedArg = true;
2482 isNamedArg = (it - FI.arg_begin()) <
2483 static_cast<signed>(numRequiredArgs);
2485 unsigned neededInt, neededSSE;
2486 it->info = classifyArgumentType(it->type, freeIntRegs, neededInt,
2487 neededSSE, isNamedArg);
2489 // AMD64-ABI 3.2.3p3: If there are no registers available for any
2490 // eightbyte of an argument, the whole argument is passed on the
2491 // stack. If registers have already been assigned for some
2492 // eightbytes of such an argument, the assignments get reverted.
2493 if (freeIntRegs >= neededInt && freeSSERegs >= neededSSE) {
2494 freeIntRegs -= neededInt;
2495 freeSSERegs -= neededSSE;
2497 it->info = getIndirectResult(it->type, freeIntRegs);
2502 static llvm::Value *EmitVAArgFromMemory(llvm::Value *VAListAddr,
2504 CodeGenFunction &CGF) {
2505 llvm::Value *overflow_arg_area_p =
2506 CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_p");
2507 llvm::Value *overflow_arg_area =
2508 CGF.Builder.CreateLoad(overflow_arg_area_p, "overflow_arg_area");
2510 // AMD64-ABI 3.5.7p5: Step 7. Align l->overflow_arg_area upwards to a 16
2511 // byte boundary if alignment needed by type exceeds 8 byte boundary.
2512 // It isn't stated explicitly in the standard, but in practice we use
2513 // alignment greater than 16 where necessary.
2514 uint64_t Align = CGF.getContext().getTypeAlign(Ty) / 8;
2516 // overflow_arg_area = (overflow_arg_area + align - 1) & -align;
2517 llvm::Value *Offset =
2518 llvm::ConstantInt::get(CGF.Int64Ty, Align - 1);
2519 overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset);
2520 llvm::Value *AsInt = CGF.Builder.CreatePtrToInt(overflow_arg_area,
2522 llvm::Value *Mask = llvm::ConstantInt::get(CGF.Int64Ty, -(uint64_t)Align);
2524 CGF.Builder.CreateIntToPtr(CGF.Builder.CreateAnd(AsInt, Mask),
2525 overflow_arg_area->getType(),
2526 "overflow_arg_area.align");
2529 // AMD64-ABI 3.5.7p5: Step 8. Fetch type from l->overflow_arg_area.
2530 llvm::Type *LTy = CGF.ConvertTypeForMem(Ty);
2532 CGF.Builder.CreateBitCast(overflow_arg_area,
2533 llvm::PointerType::getUnqual(LTy));
2535 // AMD64-ABI 3.5.7p5: Step 9. Set l->overflow_arg_area to:
2536 // l->overflow_arg_area + sizeof(type).
2537 // AMD64-ABI 3.5.7p5: Step 10. Align l->overflow_arg_area upwards to
2538 // an 8 byte boundary.
2540 uint64_t SizeInBytes = (CGF.getContext().getTypeSize(Ty) + 7) / 8;
2541 llvm::Value *Offset =
2542 llvm::ConstantInt::get(CGF.Int32Ty, (SizeInBytes + 7) & ~7);
2543 overflow_arg_area = CGF.Builder.CreateGEP(overflow_arg_area, Offset,
2544 "overflow_arg_area.next");
2545 CGF.Builder.CreateStore(overflow_arg_area, overflow_arg_area_p);
2547 // AMD64-ABI 3.5.7p5: Step 11. Return the fetched type.
2551 llvm::Value *X86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
2552 CodeGenFunction &CGF) const {
2553 // Assume that va_list type is correct; should be pointer to LLVM type:
2557 // i8* overflow_arg_area;
2558 // i8* reg_save_area;
2560 unsigned neededInt, neededSSE;
2562 Ty = CGF.getContext().getCanonicalType(Ty);
2563 ABIArgInfo AI = classifyArgumentType(Ty, 0, neededInt, neededSSE,
2564 /*isNamedArg*/false);
2566 // AMD64-ABI 3.5.7p5: Step 1. Determine whether type may be passed
2567 // in the registers. If not go to step 7.
2568 if (!neededInt && !neededSSE)
2569 return EmitVAArgFromMemory(VAListAddr, Ty, CGF);
2571 // AMD64-ABI 3.5.7p5: Step 2. Compute num_gp to hold the number of
2572 // general purpose registers needed to pass type and num_fp to hold
2573 // the number of floating point registers needed.
2575 // AMD64-ABI 3.5.7p5: Step 3. Verify whether arguments fit into
2576 // registers. In the case: l->gp_offset > 48 - num_gp * 8 or
2577 // l->fp_offset > 304 - num_fp * 16 go to step 7.
2579 // NOTE: 304 is a typo, there are (6 * 8 + 8 * 16) = 176 bytes of
2580 // register save space).
2582 llvm::Value *InRegs = nullptr;
2583 llvm::Value *gp_offset_p = nullptr, *gp_offset = nullptr;
2584 llvm::Value *fp_offset_p = nullptr, *fp_offset = nullptr;
2586 gp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, "gp_offset_p");
2587 gp_offset = CGF.Builder.CreateLoad(gp_offset_p, "gp_offset");
2588 InRegs = llvm::ConstantInt::get(CGF.Int32Ty, 48 - neededInt * 8);
2589 InRegs = CGF.Builder.CreateICmpULE(gp_offset, InRegs, "fits_in_gp");
2593 fp_offset_p = CGF.Builder.CreateStructGEP(VAListAddr, 1, "fp_offset_p");
2594 fp_offset = CGF.Builder.CreateLoad(fp_offset_p, "fp_offset");
2595 llvm::Value *FitsInFP =
2596 llvm::ConstantInt::get(CGF.Int32Ty, 176 - neededSSE * 16);
2597 FitsInFP = CGF.Builder.CreateICmpULE(fp_offset, FitsInFP, "fits_in_fp");
2598 InRegs = InRegs ? CGF.Builder.CreateAnd(InRegs, FitsInFP) : FitsInFP;
2601 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
2602 llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem");
2603 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
2604 CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock);
2606 // Emit code to load the value if it was passed in registers.
2608 CGF.EmitBlock(InRegBlock);
2610 // AMD64-ABI 3.5.7p5: Step 4. Fetch type from l->reg_save_area with
2611 // an offset of l->gp_offset and/or l->fp_offset. This may require
2612 // copying to a temporary location in case the parameter is passed
2613 // in different register classes or requires an alignment greater
2614 // than 8 for general purpose registers and 16 for XMM registers.
2616 // FIXME: This really results in shameful code when we end up needing to
2617 // collect arguments from different places; often what should result in a
2618 // simple assembling of a structure from scattered addresses has many more
2619 // loads than necessary. Can we clean this up?
2620 llvm::Type *LTy = CGF.ConvertTypeForMem(Ty);
2621 llvm::Value *RegAddr =
2622 CGF.Builder.CreateLoad(CGF.Builder.CreateStructGEP(VAListAddr, 3),
2624 if (neededInt && neededSSE) {
2626 assert(AI.isDirect() && "Unexpected ABI info for mixed regs");
2627 llvm::StructType *ST = cast<llvm::StructType>(AI.getCoerceToType());
2628 llvm::Value *Tmp = CGF.CreateMemTemp(Ty);
2629 Tmp = CGF.Builder.CreateBitCast(Tmp, ST->getPointerTo());
2630 assert(ST->getNumElements() == 2 && "Unexpected ABI info for mixed regs");
2631 llvm::Type *TyLo = ST->getElementType(0);
2632 llvm::Type *TyHi = ST->getElementType(1);
2633 assert((TyLo->isFPOrFPVectorTy() ^ TyHi->isFPOrFPVectorTy()) &&
2634 "Unexpected ABI info for mixed regs");
2635 llvm::Type *PTyLo = llvm::PointerType::getUnqual(TyLo);
2636 llvm::Type *PTyHi = llvm::PointerType::getUnqual(TyHi);
2637 llvm::Value *GPAddr = CGF.Builder.CreateGEP(RegAddr, gp_offset);
2638 llvm::Value *FPAddr = CGF.Builder.CreateGEP(RegAddr, fp_offset);
2639 llvm::Value *RegLoAddr = TyLo->isFPOrFPVectorTy() ? FPAddr : GPAddr;
2640 llvm::Value *RegHiAddr = TyLo->isFPOrFPVectorTy() ? GPAddr : FPAddr;
2642 CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegLoAddr, PTyLo));
2643 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0));
2644 V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegHiAddr, PTyHi));
2645 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1));
2647 RegAddr = CGF.Builder.CreateBitCast(Tmp,
2648 llvm::PointerType::getUnqual(LTy));
2649 } else if (neededInt) {
2650 RegAddr = CGF.Builder.CreateGEP(RegAddr, gp_offset);
2651 RegAddr = CGF.Builder.CreateBitCast(RegAddr,
2652 llvm::PointerType::getUnqual(LTy));
2654 // Copy to a temporary if necessary to ensure the appropriate alignment.
2655 std::pair<CharUnits, CharUnits> SizeAlign =
2656 CGF.getContext().getTypeInfoInChars(Ty);
2657 uint64_t TySize = SizeAlign.first.getQuantity();
2658 unsigned TyAlign = SizeAlign.second.getQuantity();
2660 llvm::Value *Tmp = CGF.CreateMemTemp(Ty);
2661 CGF.Builder.CreateMemCpy(Tmp, RegAddr, TySize, 8, false);
2664 } else if (neededSSE == 1) {
2665 RegAddr = CGF.Builder.CreateGEP(RegAddr, fp_offset);
2666 RegAddr = CGF.Builder.CreateBitCast(RegAddr,
2667 llvm::PointerType::getUnqual(LTy));
2669 assert(neededSSE == 2 && "Invalid number of needed registers!");
2670 // SSE registers are spaced 16 bytes apart in the register save
2671 // area, we need to collect the two eightbytes together.
2672 llvm::Value *RegAddrLo = CGF.Builder.CreateGEP(RegAddr, fp_offset);
2673 llvm::Value *RegAddrHi = CGF.Builder.CreateConstGEP1_32(RegAddrLo, 16);
2674 llvm::Type *DoubleTy = CGF.DoubleTy;
2675 llvm::Type *DblPtrTy =
2676 llvm::PointerType::getUnqual(DoubleTy);
2677 llvm::StructType *ST = llvm::StructType::get(DoubleTy, DoubleTy, NULL);
2678 llvm::Value *V, *Tmp = CGF.CreateMemTemp(Ty);
2679 Tmp = CGF.Builder.CreateBitCast(Tmp, ST->getPointerTo());
2680 V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrLo,
2682 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 0));
2683 V = CGF.Builder.CreateLoad(CGF.Builder.CreateBitCast(RegAddrHi,
2685 CGF.Builder.CreateStore(V, CGF.Builder.CreateStructGEP(Tmp, 1));
2686 RegAddr = CGF.Builder.CreateBitCast(Tmp,
2687 llvm::PointerType::getUnqual(LTy));
2690 // AMD64-ABI 3.5.7p5: Step 5. Set:
2691 // l->gp_offset = l->gp_offset + num_gp * 8
2692 // l->fp_offset = l->fp_offset + num_fp * 16.
2694 llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededInt * 8);
2695 CGF.Builder.CreateStore(CGF.Builder.CreateAdd(gp_offset, Offset),
2699 llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, neededSSE * 16);
2700 CGF.Builder.CreateStore(CGF.Builder.CreateAdd(fp_offset, Offset),
2703 CGF.EmitBranch(ContBlock);
2705 // Emit code to load the value if it was passed in memory.
2707 CGF.EmitBlock(InMemBlock);
2708 llvm::Value *MemAddr = EmitVAArgFromMemory(VAListAddr, Ty, CGF);
2710 // Return the appropriate result.
2712 CGF.EmitBlock(ContBlock);
2713 llvm::PHINode *ResAddr = CGF.Builder.CreatePHI(RegAddr->getType(), 2,
2715 ResAddr->addIncoming(RegAddr, InRegBlock);
2716 ResAddr->addIncoming(MemAddr, InMemBlock);
2720 ABIArgInfo WinX86_64ABIInfo::classify(QualType Ty, bool IsReturnType) const {
2722 if (Ty->isVoidType())
2723 return ABIArgInfo::getIgnore();
2725 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
2726 Ty = EnumTy->getDecl()->getIntegerType();
2728 uint64_t Size = getContext().getTypeSize(Ty);
2730 const RecordType *RT = Ty->getAs<RecordType>();
2732 if (!IsReturnType) {
2733 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(RT, getCXXABI()))
2734 return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
2737 if (RT->getDecl()->hasFlexibleArrayMember())
2738 return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
2740 // FIXME: mingw-w64-gcc emits 128-bit struct as i128
2741 if (Size == 128 && getTarget().getTriple().isWindowsGNUEnvironment())
2742 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
2746 if (Ty->isMemberPointerType()) {
2747 // If the member pointer is represented by an LLVM int or ptr, pass it
2749 llvm::Type *LLTy = CGT.ConvertType(Ty);
2750 if (LLTy->isPointerTy() || LLTy->isIntegerTy())
2751 return ABIArgInfo::getDirect();
2754 if (RT || Ty->isMemberPointerType()) {
2755 // MS x64 ABI requirement: "Any argument that doesn't fit in 8 bytes, or is
2756 // not 1, 2, 4, or 8 bytes, must be passed by reference."
2757 if (Size > 64 || !llvm::isPowerOf2_64(Size))
2758 return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
2760 // Otherwise, coerce it to a small integer.
2761 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Size));
2764 if (Ty->isPromotableIntegerType())
2765 return ABIArgInfo::getExtend();
2767 return ABIArgInfo::getDirect();
2770 void WinX86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const {
2771 if (!getCXXABI().classifyReturnType(FI))
2772 FI.getReturnInfo() = classify(FI.getReturnType(), true);
2774 for (auto &I : FI.arguments())
2775 I.info = classify(I.type, false);
2778 llvm::Value *WinX86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
2779 CodeGenFunction &CGF) const {
2780 llvm::Type *BPP = CGF.Int8PtrPtrTy;
2782 CGBuilderTy &Builder = CGF.Builder;
2783 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP,
2785 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
2787 llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
2788 llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
2791 llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 8);
2792 llvm::Value *NextAddr =
2793 Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset),
2795 Builder.CreateStore(NextAddr, VAListAddrAsBPP);
2802 class NaClX86_64ABIInfo : public ABIInfo {
2804 NaClX86_64ABIInfo(CodeGen::CodeGenTypes &CGT, bool HasAVX)
2805 : ABIInfo(CGT), PInfo(CGT), NInfo(CGT, HasAVX) {}
2806 void computeInfo(CGFunctionInfo &FI) const override;
2807 llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
2808 CodeGenFunction &CGF) const override;
2810 PNaClABIInfo PInfo; // Used for generating calls with pnaclcall callingconv.
2811 X86_64ABIInfo NInfo; // Used for everything else.
2814 class NaClX86_64TargetCodeGenInfo : public TargetCodeGenInfo {
2816 NaClX86_64TargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, bool HasAVX)
2817 : TargetCodeGenInfo(new NaClX86_64ABIInfo(CGT, HasAVX)) {}
2822 void NaClX86_64ABIInfo::computeInfo(CGFunctionInfo &FI) const {
2823 if (FI.getASTCallingConvention() == CC_PnaclCall)
2824 PInfo.computeInfo(FI);
2826 NInfo.computeInfo(FI);
2829 llvm::Value *NaClX86_64ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
2830 CodeGenFunction &CGF) const {
2831 // Always use the native convention; calling pnacl-style varargs functions
2833 return NInfo.EmitVAArg(VAListAddr, Ty, CGF);
2840 class PPC32TargetCodeGenInfo : public DefaultTargetCodeGenInfo {
2842 PPC32TargetCodeGenInfo(CodeGenTypes &CGT) : DefaultTargetCodeGenInfo(CGT) {}
2844 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
2845 // This is recovered from gcc output.
2846 return 1; // r1 is the dedicated stack pointer
2849 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
2850 llvm::Value *Address) const override;
2856 PPC32TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
2857 llvm::Value *Address) const {
2858 // This is calculated from the LLVM and GCC tables and verified
2859 // against gcc output. AFAIK all ABIs use the same encoding.
2861 CodeGen::CGBuilderTy &Builder = CGF.Builder;
2863 llvm::IntegerType *i8 = CGF.Int8Ty;
2864 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
2865 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
2866 llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16);
2868 // 0-31: r0-31, the 4-byte general-purpose registers
2869 AssignToArrayRange(Builder, Address, Four8, 0, 31);
2871 // 32-63: fp0-31, the 8-byte floating-point registers
2872 AssignToArrayRange(Builder, Address, Eight8, 32, 63);
2874 // 64-76 are various 4-byte special-purpose registers:
2881 AssignToArrayRange(Builder, Address, Four8, 64, 76);
2883 // 77-108: v0-31, the 16-byte vector registers
2884 AssignToArrayRange(Builder, Address, Sixteen8, 77, 108);
2891 AssignToArrayRange(Builder, Address, Four8, 109, 113);
2899 /// PPC64_SVR4_ABIInfo - The 64-bit PowerPC ELF (SVR4) ABI information.
2900 class PPC64_SVR4_ABIInfo : public DefaultABIInfo {
2908 static const unsigned GPRBits = 64;
2912 PPC64_SVR4_ABIInfo(CodeGen::CodeGenTypes &CGT, ABIKind Kind)
2913 : DefaultABIInfo(CGT), Kind(Kind) {}
2915 bool isPromotableTypeForABI(QualType Ty) const;
2916 bool isAlignedParamType(QualType Ty) const;
2917 bool isHomogeneousAggregate(QualType Ty, const Type *&Base,
2918 uint64_t &Members) const;
2920 ABIArgInfo classifyReturnType(QualType RetTy) const;
2921 ABIArgInfo classifyArgumentType(QualType Ty) const;
2923 // TODO: We can add more logic to computeInfo to improve performance.
2924 // Example: For aggregate arguments that fit in a register, we could
2925 // use getDirectInReg (as is done below for structs containing a single
2926 // floating-point value) to avoid pushing them to memory on function
2927 // entry. This would require changing the logic in PPCISelLowering
2928 // when lowering the parameters in the caller and args in the callee.
2929 void computeInfo(CGFunctionInfo &FI) const override {
2930 if (!getCXXABI().classifyReturnType(FI))
2931 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
2932 for (auto &I : FI.arguments()) {
2933 // We rely on the default argument classification for the most part.
2934 // One exception: An aggregate containing a single floating-point
2935 // or vector item must be passed in a register if one is available.
2936 const Type *T = isSingleElementStruct(I.type, getContext());
2938 const BuiltinType *BT = T->getAs<BuiltinType>();
2939 if ((T->isVectorType() && getContext().getTypeSize(T) == 128) ||
2940 (BT && BT->isFloatingPoint())) {
2942 I.info = ABIArgInfo::getDirectInReg(CGT.ConvertType(QT));
2946 I.info = classifyArgumentType(I.type);
2950 llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
2951 CodeGenFunction &CGF) const override;
2954 class PPC64_SVR4_TargetCodeGenInfo : public TargetCodeGenInfo {
2956 PPC64_SVR4_TargetCodeGenInfo(CodeGenTypes &CGT,
2957 PPC64_SVR4_ABIInfo::ABIKind Kind)
2958 : TargetCodeGenInfo(new PPC64_SVR4_ABIInfo(CGT, Kind)) {}
2960 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
2961 // This is recovered from gcc output.
2962 return 1; // r1 is the dedicated stack pointer
2965 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
2966 llvm::Value *Address) const override;
2969 class PPC64TargetCodeGenInfo : public DefaultTargetCodeGenInfo {
2971 PPC64TargetCodeGenInfo(CodeGenTypes &CGT) : DefaultTargetCodeGenInfo(CGT) {}
2973 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
2974 // This is recovered from gcc output.
2975 return 1; // r1 is the dedicated stack pointer
2978 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
2979 llvm::Value *Address) const override;
2984 // Return true if the ABI requires Ty to be passed sign- or zero-
2985 // extended to 64 bits.
2987 PPC64_SVR4_ABIInfo::isPromotableTypeForABI(QualType Ty) const {
2988 // Treat an enum type as its underlying type.
2989 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
2990 Ty = EnumTy->getDecl()->getIntegerType();
2992 // Promotable integer types are required to be promoted by the ABI.
2993 if (Ty->isPromotableIntegerType())
2996 // In addition to the usual promotable integer types, we also need to
2997 // extend all 32-bit types, since the ABI requires promotion to 64 bits.
2998 if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
2999 switch (BT->getKind()) {
3000 case BuiltinType::Int:
3001 case BuiltinType::UInt:
3010 /// isAlignedParamType - Determine whether a type requires 16-byte
3011 /// alignment in the parameter area.
3013 PPC64_SVR4_ABIInfo::isAlignedParamType(QualType Ty) const {
3014 // Complex types are passed just like their elements.
3015 if (const ComplexType *CTy = Ty->getAs<ComplexType>())
3016 Ty = CTy->getElementType();
3018 // Only vector types of size 16 bytes need alignment (larger types are
3019 // passed via reference, smaller types are not aligned).
3020 if (Ty->isVectorType())
3021 return getContext().getTypeSize(Ty) == 128;
3023 // For single-element float/vector structs, we consider the whole type
3024 // to have the same alignment requirements as its single element.
3025 const Type *AlignAsType = nullptr;
3026 const Type *EltType = isSingleElementStruct(Ty, getContext());
3028 const BuiltinType *BT = EltType->getAs<BuiltinType>();
3029 if ((EltType->isVectorType() &&
3030 getContext().getTypeSize(EltType) == 128) ||
3031 (BT && BT->isFloatingPoint()))
3032 AlignAsType = EltType;
3035 // Likewise for ELFv2 homogeneous aggregates.
3036 const Type *Base = nullptr;
3037 uint64_t Members = 0;
3038 if (!AlignAsType && Kind == ELFv2 &&
3039 isAggregateTypeForABI(Ty) && isHomogeneousAggregate(Ty, Base, Members))
3042 // With special case aggregates, only vector base types need alignment.
3044 return AlignAsType->isVectorType();
3046 // Otherwise, we only need alignment for any aggregate type that
3047 // has an alignment requirement of >= 16 bytes.
3048 if (isAggregateTypeForABI(Ty) && getContext().getTypeAlign(Ty) >= 128)
3054 /// isHomogeneousAggregate - Return true if a type is an ELFv2 homogeneous
3055 /// aggregate. Base is set to the base element type, and Members is set
3056 /// to the number of base elements.
3058 PPC64_SVR4_ABIInfo::isHomogeneousAggregate(QualType Ty, const Type *&Base,
3059 uint64_t &Members) const {
3060 if (const ConstantArrayType *AT = getContext().getAsConstantArrayType(Ty)) {
3061 uint64_t NElements = AT->getSize().getZExtValue();
3064 if (!isHomogeneousAggregate(AT->getElementType(), Base, Members))
3066 Members *= NElements;
3067 } else if (const RecordType *RT = Ty->getAs<RecordType>()) {
3068 const RecordDecl *RD = RT->getDecl();
3069 if (RD->hasFlexibleArrayMember())
3073 for (const auto *FD : RD->fields()) {
3074 // Ignore (non-zero arrays of) empty records.
3075 QualType FT = FD->getType();
3076 while (const ConstantArrayType *AT =
3077 getContext().getAsConstantArrayType(FT)) {
3078 if (AT->getSize().getZExtValue() == 0)
3080 FT = AT->getElementType();
3082 if (isEmptyRecord(getContext(), FT, true))
3085 // For compatibility with GCC, ignore empty bitfields in C++ mode.
3086 if (getContext().getLangOpts().CPlusPlus &&
3087 FD->isBitField() && FD->getBitWidthValue(getContext()) == 0)
3090 uint64_t FldMembers;
3091 if (!isHomogeneousAggregate(FD->getType(), Base, FldMembers))
3094 Members = (RD->isUnion() ?
3095 std::max(Members, FldMembers) : Members + FldMembers);
3101 // Ensure there is no padding.
3102 if (getContext().getTypeSize(Base) * Members !=
3103 getContext().getTypeSize(Ty))
3107 if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
3109 Ty = CT->getElementType();
3112 // Homogeneous aggregates for ELFv2 must have base types of float,
3113 // double, long double, or 128-bit vectors.
3114 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
3115 if (BT->getKind() != BuiltinType::Float &&
3116 BT->getKind() != BuiltinType::Double &&
3117 BT->getKind() != BuiltinType::LongDouble)
3119 } else if (const VectorType *VT = Ty->getAs<VectorType>()) {
3120 if (getContext().getTypeSize(VT) != 128)
3126 // The base type must be the same for all members. Types that
3127 // agree in both total size and mode (float vs. vector) are
3128 // treated as being equivalent here.
3129 const Type *TyPtr = Ty.getTypePtr();
3133 if (Base->isVectorType() != TyPtr->isVectorType() ||
3134 getContext().getTypeSize(Base) != getContext().getTypeSize(TyPtr))
3138 // Vector types require one register, floating point types require one
3139 // or two registers depending on their size.
3140 uint32_t NumRegs = Base->isVectorType() ? 1 :
3141 (getContext().getTypeSize(Base) + 63) / 64;
3143 // Homogeneous Aggregates may occupy at most 8 registers.
3144 return (Members > 0 && Members * NumRegs <= 8);
3148 PPC64_SVR4_ABIInfo::classifyArgumentType(QualType Ty) const {
3149 if (Ty->isAnyComplexType())
3150 return ABIArgInfo::getDirect();
3152 // Non-Altivec vector types are passed in GPRs (smaller than 16 bytes)
3153 // or via reference (larger than 16 bytes).
3154 if (Ty->isVectorType()) {
3155 uint64_t Size = getContext().getTypeSize(Ty);
3157 return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
3158 else if (Size < 128) {
3159 llvm::Type *CoerceTy = llvm::IntegerType::get(getVMContext(), Size);
3160 return ABIArgInfo::getDirect(CoerceTy);
3164 if (isAggregateTypeForABI(Ty)) {
3165 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
3166 return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
3168 uint64_t ABIAlign = isAlignedParamType(Ty)? 16 : 8;
3169 uint64_t TyAlign = getContext().getTypeAlign(Ty) / 8;
3171 // ELFv2 homogeneous aggregates are passed as array types.
3172 const Type *Base = nullptr;
3173 uint64_t Members = 0;
3174 if (Kind == ELFv2 &&
3175 isHomogeneousAggregate(Ty, Base, Members)) {
3176 llvm::Type *BaseTy = CGT.ConvertType(QualType(Base, 0));
3177 llvm::Type *CoerceTy = llvm::ArrayType::get(BaseTy, Members);
3178 return ABIArgInfo::getDirect(CoerceTy);
3181 // If an aggregate may end up fully in registers, we do not
3182 // use the ByVal method, but pass the aggregate as array.
3183 // This is usually beneficial since we avoid forcing the
3184 // back-end to store the argument to memory.
3185 uint64_t Bits = getContext().getTypeSize(Ty);
3186 if (Bits > 0 && Bits <= 8 * GPRBits) {
3187 llvm::Type *CoerceTy;
3189 // Types up to 8 bytes are passed as integer type (which will be
3190 // properly aligned in the argument save area doubleword).
3191 if (Bits <= GPRBits)
3192 CoerceTy = llvm::IntegerType::get(getVMContext(),
3193 llvm::RoundUpToAlignment(Bits, 8));
3194 // Larger types are passed as arrays, with the base type selected
3195 // according to the required alignment in the save area.
3197 uint64_t RegBits = ABIAlign * 8;
3198 uint64_t NumRegs = llvm::RoundUpToAlignment(Bits, RegBits) / RegBits;
3199 llvm::Type *RegTy = llvm::IntegerType::get(getVMContext(), RegBits);
3200 CoerceTy = llvm::ArrayType::get(RegTy, NumRegs);
3203 return ABIArgInfo::getDirect(CoerceTy);
3206 // All other aggregates are passed ByVal.
3207 return ABIArgInfo::getIndirect(ABIAlign, /*ByVal=*/true,
3208 /*Realign=*/TyAlign > ABIAlign);
3211 return (isPromotableTypeForABI(Ty) ?
3212 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
3216 PPC64_SVR4_ABIInfo::classifyReturnType(QualType RetTy) const {
3217 if (RetTy->isVoidType())
3218 return ABIArgInfo::getIgnore();
3220 if (RetTy->isAnyComplexType())
3221 return ABIArgInfo::getDirect();
3223 // Non-Altivec vector types are returned in GPRs (smaller than 16 bytes)
3224 // or via reference (larger than 16 bytes).
3225 if (RetTy->isVectorType()) {
3226 uint64_t Size = getContext().getTypeSize(RetTy);
3228 return ABIArgInfo::getIndirect(0);
3229 else if (Size < 128) {
3230 llvm::Type *CoerceTy = llvm::IntegerType::get(getVMContext(), Size);
3231 return ABIArgInfo::getDirect(CoerceTy);
3235 if (isAggregateTypeForABI(RetTy)) {
3236 // ELFv2 homogeneous aggregates are returned as array types.
3237 const Type *Base = nullptr;
3238 uint64_t Members = 0;
3239 if (Kind == ELFv2 &&
3240 isHomogeneousAggregate(RetTy, Base, Members)) {
3241 llvm::Type *BaseTy = CGT.ConvertType(QualType(Base, 0));
3242 llvm::Type *CoerceTy = llvm::ArrayType::get(BaseTy, Members);
3243 return ABIArgInfo::getDirect(CoerceTy);
3246 // ELFv2 small aggregates are returned in up to two registers.
3247 uint64_t Bits = getContext().getTypeSize(RetTy);
3248 if (Kind == ELFv2 && Bits <= 2 * GPRBits) {
3250 return ABIArgInfo::getIgnore();
3252 llvm::Type *CoerceTy;
3253 if (Bits > GPRBits) {
3254 CoerceTy = llvm::IntegerType::get(getVMContext(), GPRBits);
3255 CoerceTy = llvm::StructType::get(CoerceTy, CoerceTy, NULL);
3257 CoerceTy = llvm::IntegerType::get(getVMContext(),
3258 llvm::RoundUpToAlignment(Bits, 8));
3259 return ABIArgInfo::getDirect(CoerceTy);
3262 // All other aggregates are returned indirectly.
3263 return ABIArgInfo::getIndirect(0);
3266 return (isPromotableTypeForABI(RetTy) ?
3267 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
3270 // Based on ARMABIInfo::EmitVAArg, adjusted for 64-bit machine.
3271 llvm::Value *PPC64_SVR4_ABIInfo::EmitVAArg(llvm::Value *VAListAddr,
3273 CodeGenFunction &CGF) const {
3274 llvm::Type *BP = CGF.Int8PtrTy;
3275 llvm::Type *BPP = CGF.Int8PtrPtrTy;
3277 CGBuilderTy &Builder = CGF.Builder;
3278 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap");
3279 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
3281 // Handle types that require 16-byte alignment in the parameter save area.
3282 if (isAlignedParamType(Ty)) {
3283 llvm::Value *AddrAsInt = Builder.CreatePtrToInt(Addr, CGF.Int64Ty);
3284 AddrAsInt = Builder.CreateAdd(AddrAsInt, Builder.getInt64(15));
3285 AddrAsInt = Builder.CreateAnd(AddrAsInt, Builder.getInt64(-16));
3286 Addr = Builder.CreateIntToPtr(AddrAsInt, BP, "ap.align");
3289 // Update the va_list pointer. The pointer should be bumped by the
3290 // size of the object. We can trust getTypeSize() except for a complex
3291 // type whose base type is smaller than a doubleword. For these, the
3292 // size of the object is 16 bytes; see below for further explanation.
3293 unsigned SizeInBytes = CGF.getContext().getTypeSize(Ty) / 8;
3295 unsigned CplxBaseSize = 0;
3297 if (const ComplexType *CTy = Ty->getAs<ComplexType>()) {
3298 BaseTy = CTy->getElementType();
3299 CplxBaseSize = CGF.getContext().getTypeSize(BaseTy) / 8;
3300 if (CplxBaseSize < 8)
3304 unsigned Offset = llvm::RoundUpToAlignment(SizeInBytes, 8);
3305 llvm::Value *NextAddr =
3306 Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int64Ty, Offset),
3308 Builder.CreateStore(NextAddr, VAListAddrAsBPP);
3310 // If we have a complex type and the base type is smaller than 8 bytes,
3311 // the ABI calls for the real and imaginary parts to be right-adjusted
3312 // in separate doublewords. However, Clang expects us to produce a
3313 // pointer to a structure with the two parts packed tightly. So generate
3314 // loads of the real and imaginary parts relative to the va_list pointer,
3315 // and store them to a temporary structure.
3316 if (CplxBaseSize && CplxBaseSize < 8) {
3317 llvm::Value *RealAddr = Builder.CreatePtrToInt(Addr, CGF.Int64Ty);
3318 llvm::Value *ImagAddr = RealAddr;
3319 if (CGF.CGM.getDataLayout().isBigEndian()) {
3320 RealAddr = Builder.CreateAdd(RealAddr, Builder.getInt64(8 - CplxBaseSize));
3321 ImagAddr = Builder.CreateAdd(ImagAddr, Builder.getInt64(16 - CplxBaseSize));
3323 ImagAddr = Builder.CreateAdd(ImagAddr, Builder.getInt64(8));
3325 llvm::Type *PBaseTy = llvm::PointerType::getUnqual(CGF.ConvertType(BaseTy));
3326 RealAddr = Builder.CreateIntToPtr(RealAddr, PBaseTy);
3327 ImagAddr = Builder.CreateIntToPtr(ImagAddr, PBaseTy);
3328 llvm::Value *Real = Builder.CreateLoad(RealAddr, false, ".vareal");
3329 llvm::Value *Imag = Builder.CreateLoad(ImagAddr, false, ".vaimag");
3330 llvm::Value *Ptr = CGF.CreateTempAlloca(CGT.ConvertTypeForMem(Ty),
3332 llvm::Value *RealPtr = Builder.CreateStructGEP(Ptr, 0, ".real");
3333 llvm::Value *ImagPtr = Builder.CreateStructGEP(Ptr, 1, ".imag");
3334 Builder.CreateStore(Real, RealPtr, false);
3335 Builder.CreateStore(Imag, ImagPtr, false);
3339 // If the argument is smaller than 8 bytes, it is right-adjusted in
3340 // its doubleword slot. Adjust the pointer to pick it up from the
3342 if (SizeInBytes < 8 && CGF.CGM.getDataLayout().isBigEndian()) {
3343 llvm::Value *AddrAsInt = Builder.CreatePtrToInt(Addr, CGF.Int64Ty);
3344 AddrAsInt = Builder.CreateAdd(AddrAsInt, Builder.getInt64(8 - SizeInBytes));
3345 Addr = Builder.CreateIntToPtr(AddrAsInt, BP);
3348 llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
3349 return Builder.CreateBitCast(Addr, PTy);
3353 PPC64_initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
3354 llvm::Value *Address) {
3355 // This is calculated from the LLVM and GCC tables and verified
3356 // against gcc output. AFAIK all ABIs use the same encoding.
3358 CodeGen::CGBuilderTy &Builder = CGF.Builder;
3360 llvm::IntegerType *i8 = CGF.Int8Ty;
3361 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
3362 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
3363 llvm::Value *Sixteen8 = llvm::ConstantInt::get(i8, 16);
3365 // 0-31: r0-31, the 8-byte general-purpose registers
3366 AssignToArrayRange(Builder, Address, Eight8, 0, 31);
3368 // 32-63: fp0-31, the 8-byte floating-point registers
3369 AssignToArrayRange(Builder, Address, Eight8, 32, 63);
3371 // 64-76 are various 4-byte special-purpose registers:
3378 AssignToArrayRange(Builder, Address, Four8, 64, 76);
3380 // 77-108: v0-31, the 16-byte vector registers
3381 AssignToArrayRange(Builder, Address, Sixteen8, 77, 108);
3388 AssignToArrayRange(Builder, Address, Four8, 109, 113);
3394 PPC64_SVR4_TargetCodeGenInfo::initDwarfEHRegSizeTable(
3395 CodeGen::CodeGenFunction &CGF,
3396 llvm::Value *Address) const {
3398 return PPC64_initDwarfEHRegSizeTable(CGF, Address);
3402 PPC64TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
3403 llvm::Value *Address) const {
3405 return PPC64_initDwarfEHRegSizeTable(CGF, Address);
3408 //===----------------------------------------------------------------------===//
3409 // AArch64 ABI Implementation
3410 //===----------------------------------------------------------------------===//
3414 class AArch64ABIInfo : public ABIInfo {
3425 AArch64ABIInfo(CodeGenTypes &CGT, ABIKind Kind) : ABIInfo(CGT), Kind(Kind) {}
3428 ABIKind getABIKind() const { return Kind; }
3429 bool isDarwinPCS() const { return Kind == DarwinPCS; }
3431 ABIArgInfo classifyReturnType(QualType RetTy) const;
3432 ABIArgInfo classifyArgumentType(QualType RetTy, unsigned &AllocatedVFP,
3433 bool &IsHA, unsigned &AllocatedGPR,
3434 bool &IsSmallAggr, bool IsNamedArg) const;
3435 bool isIllegalVectorType(QualType Ty) const;
3437 virtual void computeInfo(CGFunctionInfo &FI) const {
3438 // To correctly handle Homogeneous Aggregate, we need to keep track of the
3439 // number of SIMD and Floating-point registers allocated so far.
3440 // If the argument is an HFA or an HVA and there are sufficient unallocated
3441 // SIMD and Floating-point registers, then the argument is allocated to SIMD
3442 // and Floating-point Registers (with one register per member of the HFA or
3443 // HVA). Otherwise, the NSRN is set to 8.
3444 unsigned AllocatedVFP = 0;
3446 // To correctly handle small aggregates, we need to keep track of the number
3447 // of GPRs allocated so far. If the small aggregate can't all fit into
3448 // registers, it will be on stack. We don't allow the aggregate to be
3449 // partially in registers.
3450 unsigned AllocatedGPR = 0;
3452 // Find the number of named arguments. Variadic arguments get special
3453 // treatment with the Darwin ABI.
3454 unsigned NumRequiredArgs = (FI.isVariadic() ?
3455 FI.getRequiredArgs().getNumRequiredArgs() :
3458 if (!getCXXABI().classifyReturnType(FI))
3459 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
3460 for (CGFunctionInfo::arg_iterator it = FI.arg_begin(), ie = FI.arg_end();
3462 unsigned PreAllocation = AllocatedVFP, PreGPR = AllocatedGPR;
3463 bool IsHA = false, IsSmallAggr = false;
3464 const unsigned NumVFPs = 8;
3465 const unsigned NumGPRs = 8;
3466 bool IsNamedArg = ((it - FI.arg_begin()) <
3467 static_cast<signed>(NumRequiredArgs));
3468 it->info = classifyArgumentType(it->type, AllocatedVFP, IsHA,
3469 AllocatedGPR, IsSmallAggr, IsNamedArg);
3471 // Under AAPCS the 64-bit stack slot alignment means we can't pass HAs
3472 // as sequences of floats since they'll get "holes" inserted as
3473 // padding by the back end.
3474 if (IsHA && AllocatedVFP > NumVFPs && !isDarwinPCS() &&
3475 getContext().getTypeAlign(it->type) < 64) {
3476 uint32_t NumStackSlots = getContext().getTypeSize(it->type);
3477 NumStackSlots = llvm::RoundUpToAlignment(NumStackSlots, 64) / 64;
3479 llvm::Type *CoerceTy = llvm::ArrayType::get(
3480 llvm::Type::getDoubleTy(getVMContext()), NumStackSlots);
3481 it->info = ABIArgInfo::getDirect(CoerceTy);
3484 // If we do not have enough VFP registers for the HA, any VFP registers
3485 // that are unallocated are marked as unavailable. To achieve this, we add
3486 // padding of (NumVFPs - PreAllocation) floats.
3487 if (IsHA && AllocatedVFP > NumVFPs && PreAllocation < NumVFPs) {
3488 llvm::Type *PaddingTy = llvm::ArrayType::get(
3489 llvm::Type::getFloatTy(getVMContext()), NumVFPs - PreAllocation);
3490 it->info.setPaddingType(PaddingTy);
3493 // If we do not have enough GPRs for the small aggregate, any GPR regs
3494 // that are unallocated are marked as unavailable.
3495 if (IsSmallAggr && AllocatedGPR > NumGPRs && PreGPR < NumGPRs) {
3496 llvm::Type *PaddingTy = llvm::ArrayType::get(
3497 llvm::Type::getInt32Ty(getVMContext()), NumGPRs - PreGPR);
3499 ABIArgInfo::getDirect(it->info.getCoerceToType(), 0, PaddingTy);
3504 llvm::Value *EmitDarwinVAArg(llvm::Value *VAListAddr, QualType Ty,
3505 CodeGenFunction &CGF) const;
3507 llvm::Value *EmitAAPCSVAArg(llvm::Value *VAListAddr, QualType Ty,
3508 CodeGenFunction &CGF) const;
3510 virtual llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
3511 CodeGenFunction &CGF) const {
3512 return isDarwinPCS() ? EmitDarwinVAArg(VAListAddr, Ty, CGF)
3513 : EmitAAPCSVAArg(VAListAddr, Ty, CGF);
3517 class AArch64TargetCodeGenInfo : public TargetCodeGenInfo {
3519 AArch64TargetCodeGenInfo(CodeGenTypes &CGT, AArch64ABIInfo::ABIKind Kind)
3520 : TargetCodeGenInfo(new AArch64ABIInfo(CGT, Kind)) {}
3522 StringRef getARCRetainAutoreleasedReturnValueMarker() const {
3523 return "mov\tfp, fp\t\t; marker for objc_retainAutoreleaseReturnValue";
3526 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const { return 31; }
3528 virtual bool doesReturnSlotInterfereWithArgs() const { return false; }
3532 static bool isHomogeneousAggregate(QualType Ty, const Type *&Base,
3533 ASTContext &Context,
3534 uint64_t *HAMembers = nullptr);
3536 ABIArgInfo AArch64ABIInfo::classifyArgumentType(QualType Ty,
3537 unsigned &AllocatedVFP,
3539 unsigned &AllocatedGPR,
3541 bool IsNamedArg) const {
3542 // Handle illegal vector types here.
3543 if (isIllegalVectorType(Ty)) {
3544 uint64_t Size = getContext().getTypeSize(Ty);
3546 llvm::Type *ResType = llvm::Type::getInt32Ty(getVMContext());
3548 return ABIArgInfo::getDirect(ResType);
3551 llvm::Type *ResType =
3552 llvm::VectorType::get(llvm::Type::getInt32Ty(getVMContext()), 2);
3554 return ABIArgInfo::getDirect(ResType);
3557 llvm::Type *ResType =
3558 llvm::VectorType::get(llvm::Type::getInt32Ty(getVMContext()), 4);
3560 return ABIArgInfo::getDirect(ResType);
3563 return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
3565 if (Ty->isVectorType())
3566 // Size of a legal vector should be either 64 or 128.
3568 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
3569 if (BT->getKind() == BuiltinType::Half ||
3570 BT->getKind() == BuiltinType::Float ||
3571 BT->getKind() == BuiltinType::Double ||
3572 BT->getKind() == BuiltinType::LongDouble)
3576 if (!isAggregateTypeForABI(Ty)) {
3577 // Treat an enum type as its underlying type.
3578 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
3579 Ty = EnumTy->getDecl()->getIntegerType();
3581 if (!Ty->isFloatingType() && !Ty->isVectorType()) {
3582 unsigned Alignment = getContext().getTypeAlign(Ty);
3583 if (!isDarwinPCS() && Alignment > 64)
3584 AllocatedGPR = llvm::RoundUpToAlignment(AllocatedGPR, Alignment / 64);
3586 int RegsNeeded = getContext().getTypeSize(Ty) > 64 ? 2 : 1;
3587 AllocatedGPR += RegsNeeded;
3589 return (Ty->isPromotableIntegerType() && isDarwinPCS()
3590 ? ABIArgInfo::getExtend()
3591 : ABIArgInfo::getDirect());
3594 // Structures with either a non-trivial destructor or a non-trivial
3595 // copy constructor are always indirect.
3596 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) {
3598 return ABIArgInfo::getIndirect(0, /*ByVal=*/RAA ==
3599 CGCXXABI::RAA_DirectInMemory);
3602 // Empty records are always ignored on Darwin, but actually passed in C++ mode
3603 // elsewhere for GNU compatibility.
3604 if (isEmptyRecord(getContext(), Ty, true)) {
3605 if (!getContext().getLangOpts().CPlusPlus || isDarwinPCS())
3606 return ABIArgInfo::getIgnore();
3609 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
3612 // Homogeneous Floating-point Aggregates (HFAs) need to be expanded.
3613 const Type *Base = nullptr;
3614 uint64_t Members = 0;
3615 if (isHomogeneousAggregate(Ty, Base, getContext(), &Members)) {
3617 if (!IsNamedArg && isDarwinPCS()) {
3618 // With the Darwin ABI, variadic arguments are always passed on the stack
3619 // and should not be expanded. Treat variadic HFAs as arrays of doubles.
3620 uint64_t Size = getContext().getTypeSize(Ty);
3621 llvm::Type *BaseTy = llvm::Type::getDoubleTy(getVMContext());
3622 return ABIArgInfo::getDirect(llvm::ArrayType::get(BaseTy, Size / 64));
3624 AllocatedVFP += Members;
3625 return ABIArgInfo::getExpand();
3628 // Aggregates <= 16 bytes are passed directly in registers or on the stack.
3629 uint64_t Size = getContext().getTypeSize(Ty);
3631 unsigned Alignment = getContext().getTypeAlign(Ty);
3632 if (!isDarwinPCS() && Alignment > 64)
3633 AllocatedGPR = llvm::RoundUpToAlignment(AllocatedGPR, Alignment / 64);
3635 Size = 64 * ((Size + 63) / 64); // round up to multiple of 8 bytes
3636 AllocatedGPR += Size / 64;
3638 // We use a pair of i64 for 16-byte aggregate with 8-byte alignment.
3639 // For aggregates with 16-byte alignment, we use i128.
3640 if (Alignment < 128 && Size == 128) {
3641 llvm::Type *BaseTy = llvm::Type::getInt64Ty(getVMContext());
3642 return ABIArgInfo::getDirect(llvm::ArrayType::get(BaseTy, Size / 64));
3644 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Size));
3648 return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
3651 ABIArgInfo AArch64ABIInfo::classifyReturnType(QualType RetTy) const {
3652 if (RetTy->isVoidType())
3653 return ABIArgInfo::getIgnore();
3655 // Large vector types should be returned via memory.
3656 if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 128)
3657 return ABIArgInfo::getIndirect(0);
3659 if (!isAggregateTypeForABI(RetTy)) {
3660 // Treat an enum type as its underlying type.
3661 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
3662 RetTy = EnumTy->getDecl()->getIntegerType();
3664 return (RetTy->isPromotableIntegerType() && isDarwinPCS()
3665 ? ABIArgInfo::getExtend()
3666 : ABIArgInfo::getDirect());
3669 if (isEmptyRecord(getContext(), RetTy, true))
3670 return ABIArgInfo::getIgnore();
3672 const Type *Base = nullptr;
3673 if (isHomogeneousAggregate(RetTy, Base, getContext()))
3674 // Homogeneous Floating-point Aggregates (HFAs) are returned directly.
3675 return ABIArgInfo::getDirect();
3677 // Aggregates <= 16 bytes are returned directly in registers or on the stack.
3678 uint64_t Size = getContext().getTypeSize(RetTy);
3680 Size = 64 * ((Size + 63) / 64); // round up to multiple of 8 bytes
3681 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(), Size));
3684 return ABIArgInfo::getIndirect(0);
3687 /// isIllegalVectorType - check whether the vector type is legal for AArch64.
3688 bool AArch64ABIInfo::isIllegalVectorType(QualType Ty) const {
3689 if (const VectorType *VT = Ty->getAs<VectorType>()) {
3690 // Check whether VT is legal.
3691 unsigned NumElements = VT->getNumElements();
3692 uint64_t Size = getContext().getTypeSize(VT);
3693 // NumElements should be power of 2 between 1 and 16.
3694 if ((NumElements & (NumElements - 1)) != 0 || NumElements > 16)
3696 return Size != 64 && (Size != 128 || NumElements == 1);
3701 static llvm::Value *EmitAArch64VAArg(llvm::Value *VAListAddr, QualType Ty,
3702 int AllocatedGPR, int AllocatedVFP,
3703 bool IsIndirect, CodeGenFunction &CGF) {
3704 // The AArch64 va_list type and handling is specified in the Procedure Call
3705 // Standard, section B.4:
3715 llvm::BasicBlock *MaybeRegBlock = CGF.createBasicBlock("vaarg.maybe_reg");
3716 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
3717 llvm::BasicBlock *OnStackBlock = CGF.createBasicBlock("vaarg.on_stack");
3718 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
3719 auto &Ctx = CGF.getContext();
3721 llvm::Value *reg_offs_p = nullptr, *reg_offs = nullptr;
3725 assert(!AllocatedVFP && "Arguments never split between int & VFP regs");
3726 // 3 is the field number of __gr_offs
3727 reg_offs_p = CGF.Builder.CreateStructGEP(VAListAddr, 3, "gr_offs_p");
3728 reg_offs = CGF.Builder.CreateLoad(reg_offs_p, "gr_offs");
3729 reg_top_index = 1; // field number for __gr_top
3730 RegSize = 8 * AllocatedGPR;
3732 assert(!AllocatedGPR && "Argument must go in VFP or int regs");
3733 // 4 is the field number of __vr_offs.
3734 reg_offs_p = CGF.Builder.CreateStructGEP(VAListAddr, 4, "vr_offs_p");
3735 reg_offs = CGF.Builder.CreateLoad(reg_offs_p, "vr_offs");
3736 reg_top_index = 2; // field number for __vr_top
3737 RegSize = 16 * AllocatedVFP;
3740 //=======================================
3741 // Find out where argument was passed
3742 //=======================================
3744 // If reg_offs >= 0 we're already using the stack for this type of
3745 // argument. We don't want to keep updating reg_offs (in case it overflows,
3746 // though anyone passing 2GB of arguments, each at most 16 bytes, deserves
3747 // whatever they get).
3748 llvm::Value *UsingStack = nullptr;
3749 UsingStack = CGF.Builder.CreateICmpSGE(
3750 reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, 0));
3752 CGF.Builder.CreateCondBr(UsingStack, OnStackBlock, MaybeRegBlock);
3754 // Otherwise, at least some kind of argument could go in these registers, the
3755 // question is whether this particular type is too big.
3756 CGF.EmitBlock(MaybeRegBlock);
3758 // Integer arguments may need to correct register alignment (for example a
3759 // "struct { __int128 a; };" gets passed in x_2N, x_{2N+1}). In this case we
3760 // align __gr_offs to calculate the potential address.
3761 if (AllocatedGPR && !IsIndirect && Ctx.getTypeAlign(Ty) > 64) {
3762 int Align = Ctx.getTypeAlign(Ty) / 8;
3764 reg_offs = CGF.Builder.CreateAdd(
3765 reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, Align - 1),
3767 reg_offs = CGF.Builder.CreateAnd(
3768 reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, -Align),
3772 // Update the gr_offs/vr_offs pointer for next call to va_arg on this va_list.
3773 llvm::Value *NewOffset = nullptr;
3774 NewOffset = CGF.Builder.CreateAdd(
3775 reg_offs, llvm::ConstantInt::get(CGF.Int32Ty, RegSize), "new_reg_offs");
3776 CGF.Builder.CreateStore(NewOffset, reg_offs_p);
3778 // Now we're in a position to decide whether this argument really was in
3779 // registers or not.
3780 llvm::Value *InRegs = nullptr;
3781 InRegs = CGF.Builder.CreateICmpSLE(
3782 NewOffset, llvm::ConstantInt::get(CGF.Int32Ty, 0), "inreg");
3784 CGF.Builder.CreateCondBr(InRegs, InRegBlock, OnStackBlock);
3786 //=======================================
3787 // Argument was in registers
3788 //=======================================
3790 // Now we emit the code for if the argument was originally passed in
3791 // registers. First start the appropriate block:
3792 CGF.EmitBlock(InRegBlock);
3794 llvm::Value *reg_top_p = nullptr, *reg_top = nullptr;
3796 CGF.Builder.CreateStructGEP(VAListAddr, reg_top_index, "reg_top_p");
3797 reg_top = CGF.Builder.CreateLoad(reg_top_p, "reg_top");
3798 llvm::Value *BaseAddr = CGF.Builder.CreateGEP(reg_top, reg_offs);
3799 llvm::Value *RegAddr = nullptr;
3800 llvm::Type *MemTy = llvm::PointerType::getUnqual(CGF.ConvertTypeForMem(Ty));
3803 // If it's been passed indirectly (actually a struct), whatever we find from
3804 // stored registers or on the stack will actually be a struct **.
3805 MemTy = llvm::PointerType::getUnqual(MemTy);
3808 const Type *Base = nullptr;
3809 uint64_t NumMembers;
3810 bool IsHFA = isHomogeneousAggregate(Ty, Base, Ctx, &NumMembers);
3811 if (IsHFA && NumMembers > 1) {
3812 // Homogeneous aggregates passed in registers will have their elements split
3813 // and stored 16-bytes apart regardless of size (they're notionally in qN,
3814 // qN+1, ...). We reload and store into a temporary local variable
3816 assert(!IsIndirect && "Homogeneous aggregates should be passed directly");
3817 llvm::Type *BaseTy = CGF.ConvertType(QualType(Base, 0));
3818 llvm::Type *HFATy = llvm::ArrayType::get(BaseTy, NumMembers);
3819 llvm::Value *Tmp = CGF.CreateTempAlloca(HFATy);
3822 if (CGF.CGM.getDataLayout().isBigEndian() && Ctx.getTypeSize(Base) < 128)
3823 Offset = 16 - Ctx.getTypeSize(Base) / 8;
3824 for (unsigned i = 0; i < NumMembers; ++i) {
3825 llvm::Value *BaseOffset =
3826 llvm::ConstantInt::get(CGF.Int32Ty, 16 * i + Offset);
3827 llvm::Value *LoadAddr = CGF.Builder.CreateGEP(BaseAddr, BaseOffset);
3828 LoadAddr = CGF.Builder.CreateBitCast(
3829 LoadAddr, llvm::PointerType::getUnqual(BaseTy));
3830 llvm::Value *StoreAddr = CGF.Builder.CreateStructGEP(Tmp, i);
3832 llvm::Value *Elem = CGF.Builder.CreateLoad(LoadAddr);
3833 CGF.Builder.CreateStore(Elem, StoreAddr);
3836 RegAddr = CGF.Builder.CreateBitCast(Tmp, MemTy);
3838 // Otherwise the object is contiguous in memory
3839 unsigned BeAlign = reg_top_index == 2 ? 16 : 8;
3840 if (CGF.CGM.getDataLayout().isBigEndian() &&
3841 (IsHFA || !isAggregateTypeForABI(Ty)) &&
3842 Ctx.getTypeSize(Ty) < (BeAlign * 8)) {
3843 int Offset = BeAlign - Ctx.getTypeSize(Ty) / 8;
3844 BaseAddr = CGF.Builder.CreatePtrToInt(BaseAddr, CGF.Int64Ty);
3846 BaseAddr = CGF.Builder.CreateAdd(
3847 BaseAddr, llvm::ConstantInt::get(CGF.Int64Ty, Offset), "align_be");
3849 BaseAddr = CGF.Builder.CreateIntToPtr(BaseAddr, CGF.Int8PtrTy);
3852 RegAddr = CGF.Builder.CreateBitCast(BaseAddr, MemTy);
3855 CGF.EmitBranch(ContBlock);
3857 //=======================================
3858 // Argument was on the stack
3859 //=======================================
3860 CGF.EmitBlock(OnStackBlock);
3862 llvm::Value *stack_p = nullptr, *OnStackAddr = nullptr;
3863 stack_p = CGF.Builder.CreateStructGEP(VAListAddr, 0, "stack_p");
3864 OnStackAddr = CGF.Builder.CreateLoad(stack_p, "stack");
3866 // Again, stack arguments may need realigmnent. In this case both integer and
3867 // floating-point ones might be affected.
3868 if (!IsIndirect && Ctx.getTypeAlign(Ty) > 64) {
3869 int Align = Ctx.getTypeAlign(Ty) / 8;
3871 OnStackAddr = CGF.Builder.CreatePtrToInt(OnStackAddr, CGF.Int64Ty);
3873 OnStackAddr = CGF.Builder.CreateAdd(
3874 OnStackAddr, llvm::ConstantInt::get(CGF.Int64Ty, Align - 1),
3876 OnStackAddr = CGF.Builder.CreateAnd(
3877 OnStackAddr, llvm::ConstantInt::get(CGF.Int64Ty, -Align),
3880 OnStackAddr = CGF.Builder.CreateIntToPtr(OnStackAddr, CGF.Int8PtrTy);
3887 StackSize = Ctx.getTypeSize(Ty) / 8;
3889 // All stack slots are 8 bytes
3890 StackSize = llvm::RoundUpToAlignment(StackSize, 8);
3892 llvm::Value *StackSizeC = llvm::ConstantInt::get(CGF.Int32Ty, StackSize);
3893 llvm::Value *NewStack =
3894 CGF.Builder.CreateGEP(OnStackAddr, StackSizeC, "new_stack");
3896 // Write the new value of __stack for the next call to va_arg
3897 CGF.Builder.CreateStore(NewStack, stack_p);
3899 if (CGF.CGM.getDataLayout().isBigEndian() && !isAggregateTypeForABI(Ty) &&
3900 Ctx.getTypeSize(Ty) < 64) {
3901 int Offset = 8 - Ctx.getTypeSize(Ty) / 8;
3902 OnStackAddr = CGF.Builder.CreatePtrToInt(OnStackAddr, CGF.Int64Ty);
3904 OnStackAddr = CGF.Builder.CreateAdd(
3905 OnStackAddr, llvm::ConstantInt::get(CGF.Int64Ty, Offset), "align_be");
3907 OnStackAddr = CGF.Builder.CreateIntToPtr(OnStackAddr, CGF.Int8PtrTy);
3910 OnStackAddr = CGF.Builder.CreateBitCast(OnStackAddr, MemTy);
3912 CGF.EmitBranch(ContBlock);
3914 //=======================================
3916 //=======================================
3917 CGF.EmitBlock(ContBlock);
3919 llvm::PHINode *ResAddr = CGF.Builder.CreatePHI(MemTy, 2, "vaarg.addr");
3920 ResAddr->addIncoming(RegAddr, InRegBlock);
3921 ResAddr->addIncoming(OnStackAddr, OnStackBlock);
3924 return CGF.Builder.CreateLoad(ResAddr, "vaarg.addr");
3929 llvm::Value *AArch64ABIInfo::EmitAAPCSVAArg(llvm::Value *VAListAddr, QualType Ty,
3930 CodeGenFunction &CGF) const {
3932 unsigned AllocatedGPR = 0, AllocatedVFP = 0;
3933 bool IsHA = false, IsSmallAggr = false;
3934 ABIArgInfo AI = classifyArgumentType(Ty, AllocatedVFP, IsHA, AllocatedGPR,
3935 IsSmallAggr, false /*IsNamedArg*/);
3937 return EmitAArch64VAArg(VAListAddr, Ty, AllocatedGPR, AllocatedVFP,
3938 AI.isIndirect(), CGF);
3941 llvm::Value *AArch64ABIInfo::EmitDarwinVAArg(llvm::Value *VAListAddr, QualType Ty,
3942 CodeGenFunction &CGF) const {
3943 // We do not support va_arg for aggregates or illegal vector types.
3944 // Lower VAArg here for these cases and use the LLVM va_arg instruction for
3946 if (!isAggregateTypeForABI(Ty) && !isIllegalVectorType(Ty))
3949 uint64_t Size = CGF.getContext().getTypeSize(Ty) / 8;
3950 uint64_t Align = CGF.getContext().getTypeAlign(Ty) / 8;
3952 const Type *Base = nullptr;
3953 bool isHA = isHomogeneousAggregate(Ty, Base, getContext());
3955 bool isIndirect = false;
3956 // Arguments bigger than 16 bytes which aren't homogeneous aggregates should
3957 // be passed indirectly.
3958 if (Size > 16 && !isHA) {
3964 llvm::Type *BP = llvm::Type::getInt8PtrTy(CGF.getLLVMContext());
3965 llvm::Type *BPP = llvm::PointerType::getUnqual(BP);
3967 CGBuilderTy &Builder = CGF.Builder;
3968 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap");
3969 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
3971 if (isEmptyRecord(getContext(), Ty, true)) {
3972 // These are ignored for parameter passing purposes.
3973 llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
3974 return Builder.CreateBitCast(Addr, PTy);
3977 const uint64_t MinABIAlign = 8;
3978 if (Align > MinABIAlign) {
3979 llvm::Value *Offset = llvm::ConstantInt::get(CGF.Int32Ty, Align - 1);
3980 Addr = Builder.CreateGEP(Addr, Offset);
3981 llvm::Value *AsInt = Builder.CreatePtrToInt(Addr, CGF.Int64Ty);
3982 llvm::Value *Mask = llvm::ConstantInt::get(CGF.Int64Ty, ~(Align - 1));
3983 llvm::Value *Aligned = Builder.CreateAnd(AsInt, Mask);
3984 Addr = Builder.CreateIntToPtr(Aligned, BP, "ap.align");
3987 uint64_t Offset = llvm::RoundUpToAlignment(Size, MinABIAlign);
3988 llvm::Value *NextAddr = Builder.CreateGEP(
3989 Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset), "ap.next");
3990 Builder.CreateStore(NextAddr, VAListAddrAsBPP);
3993 Addr = Builder.CreateLoad(Builder.CreateBitCast(Addr, BPP));
3994 llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
3995 llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
4000 //===----------------------------------------------------------------------===//
4001 // ARM ABI Implementation
4002 //===----------------------------------------------------------------------===//
4006 class ARMABIInfo : public ABIInfo {
4016 mutable int VFPRegs[16];
4017 const unsigned NumVFPs;
4018 const unsigned NumGPRs;
4019 mutable unsigned AllocatedGPRs;
4020 mutable unsigned AllocatedVFPs;
4023 ARMABIInfo(CodeGenTypes &CGT, ABIKind _Kind) : ABIInfo(CGT), Kind(_Kind),
4024 NumVFPs(16), NumGPRs(4) {
4026 resetAllocatedRegs();
4029 bool isEABI() const {
4030 switch (getTarget().getTriple().getEnvironment()) {
4031 case llvm::Triple::Android:
4032 case llvm::Triple::EABI:
4033 case llvm::Triple::EABIHF:
4034 case llvm::Triple::GNUEABI:
4035 case llvm::Triple::GNUEABIHF:
4042 bool isEABIHF() const {
4043 switch (getTarget().getTriple().getEnvironment()) {
4044 case llvm::Triple::EABIHF:
4045 case llvm::Triple::GNUEABIHF:
4052 ABIKind getABIKind() const { return Kind; }
4055 ABIArgInfo classifyReturnType(QualType RetTy, bool isVariadic) const;
4056 ABIArgInfo classifyArgumentType(QualType RetTy, bool isVariadic,
4057 bool &IsCPRC) const;
4058 bool isIllegalVectorType(QualType Ty) const;
4060 void computeInfo(CGFunctionInfo &FI) const override;
4062 llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
4063 CodeGenFunction &CGF) const override;
4065 llvm::CallingConv::ID getLLVMDefaultCC() const;
4066 llvm::CallingConv::ID getABIDefaultCC() const;
4067 void setRuntimeCC();
4069 void markAllocatedGPRs(unsigned Alignment, unsigned NumRequired) const;
4070 void markAllocatedVFPs(unsigned Alignment, unsigned NumRequired) const;
4071 void resetAllocatedRegs(void) const;
4074 class ARMTargetCodeGenInfo : public TargetCodeGenInfo {
4076 ARMTargetCodeGenInfo(CodeGenTypes &CGT, ARMABIInfo::ABIKind K)
4077 :TargetCodeGenInfo(new ARMABIInfo(CGT, K)) {}
4079 const ARMABIInfo &getABIInfo() const {
4080 return static_cast<const ARMABIInfo&>(TargetCodeGenInfo::getABIInfo());
4083 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
4087 StringRef getARCRetainAutoreleasedReturnValueMarker() const override {
4088 return "mov\tr7, r7\t\t@ marker for objc_retainAutoreleaseReturnValue";
4091 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
4092 llvm::Value *Address) const override {
4093 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4);
4095 // 0-15 are the 16 integer registers.
4096 AssignToArrayRange(CGF.Builder, Address, Four8, 0, 15);
4100 unsigned getSizeOfUnwindException() const override {
4101 if (getABIInfo().isEABI()) return 88;
4102 return TargetCodeGenInfo::getSizeOfUnwindException();
4105 void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
4106 CodeGen::CodeGenModule &CGM) const override {
4107 const FunctionDecl *FD = dyn_cast<FunctionDecl>(D);
4111 const ARMInterruptAttr *Attr = FD->getAttr<ARMInterruptAttr>();
4116 switch (Attr->getInterrupt()) {
4117 case ARMInterruptAttr::Generic: Kind = ""; break;
4118 case ARMInterruptAttr::IRQ: Kind = "IRQ"; break;
4119 case ARMInterruptAttr::FIQ: Kind = "FIQ"; break;
4120 case ARMInterruptAttr::SWI: Kind = "SWI"; break;
4121 case ARMInterruptAttr::ABORT: Kind = "ABORT"; break;
4122 case ARMInterruptAttr::UNDEF: Kind = "UNDEF"; break;
4125 llvm::Function *Fn = cast<llvm::Function>(GV);
4127 Fn->addFnAttr("interrupt", Kind);
4129 if (cast<ARMABIInfo>(getABIInfo()).getABIKind() == ARMABIInfo::APCS)
4132 // AAPCS guarantees that sp will be 8-byte aligned on any public interface,
4133 // however this is not necessarily true on taking any interrupt. Instruct
4134 // the backend to perform a realignment as part of the function prologue.
4135 llvm::AttrBuilder B;
4136 B.addStackAlignmentAttr(8);
4137 Fn->addAttributes(llvm::AttributeSet::FunctionIndex,
4138 llvm::AttributeSet::get(CGM.getLLVMContext(),
4139 llvm::AttributeSet::FunctionIndex,
4147 void ARMABIInfo::computeInfo(CGFunctionInfo &FI) const {
4148 // To correctly handle Homogeneous Aggregate, we need to keep track of the
4149 // VFP registers allocated so far.
4150 // C.1.vfp If the argument is a VFP CPRC and there are sufficient consecutive
4151 // VFP registers of the appropriate type unallocated then the argument is
4152 // allocated to the lowest-numbered sequence of such registers.
4153 // C.2.vfp If the argument is a VFP CPRC then any VFP registers that are
4154 // unallocated are marked as unavailable.
4155 resetAllocatedRegs();
4157 if (getCXXABI().classifyReturnType(FI)) {
4158 if (FI.getReturnInfo().isIndirect())
4159 markAllocatedGPRs(1, 1);
4161 FI.getReturnInfo() = classifyReturnType(FI.getReturnType(), FI.isVariadic());
4163 for (auto &I : FI.arguments()) {
4164 unsigned PreAllocationVFPs = AllocatedVFPs;
4165 unsigned PreAllocationGPRs = AllocatedGPRs;
4166 bool IsCPRC = false;
4167 // 6.1.2.3 There is one VFP co-processor register class using registers
4168 // s0-s15 (d0-d7) for passing arguments.
4169 I.info = classifyArgumentType(I.type, FI.isVariadic(), IsCPRC);
4171 // If we have allocated some arguments onto the stack (due to running
4172 // out of VFP registers), we cannot split an argument between GPRs and
4173 // the stack. If this situation occurs, we add padding to prevent the
4174 // GPRs from being used. In this situation, the current argument could
4175 // only be allocated by rule C.8, so rule C.6 would mark these GPRs as
4177 // We do not have to do this if the argument is being passed ByVal, as the
4178 // backend can handle that situation correctly.
4179 const bool StackUsed = PreAllocationGPRs > NumGPRs || PreAllocationVFPs > NumVFPs;
4180 const bool IsByVal = I.info.isIndirect() && I.info.getIndirectByVal();
4181 if (!IsCPRC && PreAllocationGPRs < NumGPRs && AllocatedGPRs > NumGPRs &&
4182 StackUsed && !IsByVal) {
4183 llvm::Type *PaddingTy = llvm::ArrayType::get(
4184 llvm::Type::getInt32Ty(getVMContext()), NumGPRs - PreAllocationGPRs);
4185 if (I.info.canHaveCoerceToType()) {
4186 I.info = ABIArgInfo::getDirect(I.info.getCoerceToType() /* type */, 0 /* offset */,
4189 I.info = ABIArgInfo::getDirect(nullptr /* type */, 0 /* offset */,
4195 // Always honor user-specified calling convention.
4196 if (FI.getCallingConvention() != llvm::CallingConv::C)
4199 llvm::CallingConv::ID cc = getRuntimeCC();
4200 if (cc != llvm::CallingConv::C)
4201 FI.setEffectiveCallingConvention(cc);
4204 /// Return the default calling convention that LLVM will use.
4205 llvm::CallingConv::ID ARMABIInfo::getLLVMDefaultCC() const {
4206 // The default calling convention that LLVM will infer.
4208 return llvm::CallingConv::ARM_AAPCS_VFP;
4210 return llvm::CallingConv::ARM_AAPCS;
4212 return llvm::CallingConv::ARM_APCS;
4215 /// Return the calling convention that our ABI would like us to use
4216 /// as the C calling convention.
4217 llvm::CallingConv::ID ARMABIInfo::getABIDefaultCC() const {
4218 switch (getABIKind()) {
4219 case APCS: return llvm::CallingConv::ARM_APCS;
4220 case AAPCS: return llvm::CallingConv::ARM_AAPCS;
4221 case AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP;
4223 llvm_unreachable("bad ABI kind");
4226 void ARMABIInfo::setRuntimeCC() {
4227 assert(getRuntimeCC() == llvm::CallingConv::C);
4229 // Don't muddy up the IR with a ton of explicit annotations if
4230 // they'd just match what LLVM will infer from the triple.
4231 llvm::CallingConv::ID abiCC = getABIDefaultCC();
4232 if (abiCC != getLLVMDefaultCC())
4236 /// isHomogeneousAggregate - Return true if a type is an AAPCS-VFP homogeneous
4237 /// aggregate. If HAMembers is non-null, the number of base elements
4238 /// contained in the type is returned through it; this is used for the
4239 /// recursive calls that check aggregate component types.
4240 static bool isHomogeneousAggregate(QualType Ty, const Type *&Base,
4241 ASTContext &Context, uint64_t *HAMembers) {
4242 uint64_t Members = 0;
4243 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) {
4244 if (!isHomogeneousAggregate(AT->getElementType(), Base, Context, &Members))
4246 Members *= AT->getSize().getZExtValue();
4247 } else if (const RecordType *RT = Ty->getAs<RecordType>()) {
4248 const RecordDecl *RD = RT->getDecl();
4249 if (RD->hasFlexibleArrayMember())
4253 for (const auto *FD : RD->fields()) {
4254 uint64_t FldMembers;
4255 if (!isHomogeneousAggregate(FD->getType(), Base, Context, &FldMembers))
4258 Members = (RD->isUnion() ?
4259 std::max(Members, FldMembers) : Members + FldMembers);
4263 if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
4265 Ty = CT->getElementType();
4268 // Homogeneous aggregates for AAPCS-VFP must have base types of float,
4269 // double, or 64-bit or 128-bit vectors.
4270 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
4271 if (BT->getKind() != BuiltinType::Float &&
4272 BT->getKind() != BuiltinType::Double &&
4273 BT->getKind() != BuiltinType::LongDouble)
4275 } else if (const VectorType *VT = Ty->getAs<VectorType>()) {
4276 unsigned VecSize = Context.getTypeSize(VT);
4277 if (VecSize != 64 && VecSize != 128)
4283 // The base type must be the same for all members. Vector types of the
4284 // same total size are treated as being equivalent here.
4285 const Type *TyPtr = Ty.getTypePtr();
4289 if (Base != TyPtr) {
4290 // Homogeneous aggregates are defined as containing members with the
4291 // same machine type. There are two cases in which two members have
4292 // different TypePtrs but the same machine type:
4294 // 1) Vectors of the same length, regardless of the type and number
4295 // of their members.
4296 const bool SameLengthVectors = Base->isVectorType() && TyPtr->isVectorType()
4297 && (Context.getTypeSize(Base) == Context.getTypeSize(TyPtr));
4299 // 2) In the 32-bit AAPCS, `double' and `long double' have the same
4300 // machine type. This is not the case for the 64-bit AAPCS.
4301 const bool SameSizeDoubles =
4302 ( ( Base->isSpecificBuiltinType(BuiltinType::Double)
4303 && TyPtr->isSpecificBuiltinType(BuiltinType::LongDouble))
4304 || ( Base->isSpecificBuiltinType(BuiltinType::LongDouble)
4305 && TyPtr->isSpecificBuiltinType(BuiltinType::Double)))
4306 && (Context.getTypeSize(Base) == Context.getTypeSize(TyPtr));
4308 if (!SameLengthVectors && !SameSizeDoubles)
4313 // Homogeneous Aggregates can have at most 4 members of the base type.
4315 *HAMembers = Members;
4317 return (Members > 0 && Members <= 4);
4320 /// markAllocatedVFPs - update VFPRegs according to the alignment and
4321 /// number of VFP registers (unit is S register) requested.
4322 void ARMABIInfo::markAllocatedVFPs(unsigned Alignment,
4323 unsigned NumRequired) const {
4325 if (AllocatedVFPs >= 16) {
4326 // We use AllocatedVFP > 16 to signal that some CPRCs were allocated on
4331 // C.1.vfp If the argument is a VFP CPRC and there are sufficient consecutive
4332 // VFP registers of the appropriate type unallocated then the argument is
4333 // allocated to the lowest-numbered sequence of such registers.
4334 for (unsigned I = 0; I < 16; I += Alignment) {
4335 bool FoundSlot = true;
4336 for (unsigned J = I, JEnd = I + NumRequired; J < JEnd; J++)
4337 if (J >= 16 || VFPRegs[J]) {
4342 for (unsigned J = I, JEnd = I + NumRequired; J < JEnd; J++)
4344 AllocatedVFPs += NumRequired;
4348 // C.2.vfp If the argument is a VFP CPRC then any VFP registers that are
4349 // unallocated are marked as unavailable.
4350 for (unsigned I = 0; I < 16; I++)
4352 AllocatedVFPs = 17; // We do not have enough VFP registers.
4355 /// Update AllocatedGPRs to record the number of general purpose registers
4356 /// which have been allocated. It is valid for AllocatedGPRs to go above 4,
4357 /// this represents arguments being stored on the stack.
4358 void ARMABIInfo::markAllocatedGPRs(unsigned Alignment,
4359 unsigned NumRequired) const {
4360 assert((Alignment == 1 || Alignment == 2) && "Alignment must be 4 or 8 bytes");
4362 if (Alignment == 2 && AllocatedGPRs & 0x1)
4365 AllocatedGPRs += NumRequired;
4368 void ARMABIInfo::resetAllocatedRegs(void) const {
4371 for (unsigned i = 0; i < NumVFPs; ++i)
4375 ABIArgInfo ARMABIInfo::classifyArgumentType(QualType Ty, bool isVariadic,
4376 bool &IsCPRC) const {
4377 // We update number of allocated VFPs according to
4378 // 6.1.2.1 The following argument types are VFP CPRCs:
4379 // A single-precision floating-point type (including promoted
4380 // half-precision types); A double-precision floating-point type;
4381 // A 64-bit or 128-bit containerized vector type; Homogeneous Aggregate
4382 // with a Base Type of a single- or double-precision floating-point type,
4383 // 64-bit containerized vectors or 128-bit containerized vectors with one
4384 // to four Elements.
4386 // Handle illegal vector types here.
4387 if (isIllegalVectorType(Ty)) {
4388 uint64_t Size = getContext().getTypeSize(Ty);
4390 llvm::Type *ResType =
4391 llvm::Type::getInt32Ty(getVMContext());
4392 markAllocatedGPRs(1, 1);
4393 return ABIArgInfo::getDirect(ResType);
4396 llvm::Type *ResType = llvm::VectorType::get(
4397 llvm::Type::getInt32Ty(getVMContext()), 2);
4398 if (getABIKind() == ARMABIInfo::AAPCS || isVariadic){
4399 markAllocatedGPRs(2, 2);
4401 markAllocatedVFPs(2, 2);
4404 return ABIArgInfo::getDirect(ResType);
4407 llvm::Type *ResType = llvm::VectorType::get(
4408 llvm::Type::getInt32Ty(getVMContext()), 4);
4409 if (getABIKind() == ARMABIInfo::AAPCS || isVariadic) {
4410 markAllocatedGPRs(2, 4);
4412 markAllocatedVFPs(4, 4);
4415 return ABIArgInfo::getDirect(ResType);
4417 markAllocatedGPRs(1, 1);
4418 return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
4420 // Update VFPRegs for legal vector types.
4421 if (getABIKind() == ARMABIInfo::AAPCS_VFP && !isVariadic) {
4422 if (const VectorType *VT = Ty->getAs<VectorType>()) {
4423 uint64_t Size = getContext().getTypeSize(VT);
4424 // Size of a legal vector should be power of 2 and above 64.
4425 markAllocatedVFPs(Size >= 128 ? 4 : 2, Size / 32);
4429 // Update VFPRegs for floating point types.
4430 if (getABIKind() == ARMABIInfo::AAPCS_VFP && !isVariadic) {
4431 if (const BuiltinType *BT = Ty->getAs<BuiltinType>()) {
4432 if (BT->getKind() == BuiltinType::Half ||
4433 BT->getKind() == BuiltinType::Float) {
4434 markAllocatedVFPs(1, 1);
4437 if (BT->getKind() == BuiltinType::Double ||
4438 BT->getKind() == BuiltinType::LongDouble) {
4439 markAllocatedVFPs(2, 2);
4445 if (!isAggregateTypeForABI(Ty)) {
4446 // Treat an enum type as its underlying type.
4447 if (const EnumType *EnumTy = Ty->getAs<EnumType>()) {
4448 Ty = EnumTy->getDecl()->getIntegerType();
4451 unsigned Size = getContext().getTypeSize(Ty);
4453 markAllocatedGPRs(Size > 32 ? 2 : 1, (Size + 31) / 32);
4454 return (Ty->isPromotableIntegerType() ?
4455 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
4458 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) {
4459 markAllocatedGPRs(1, 1);
4460 return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
4463 // Ignore empty records.
4464 if (isEmptyRecord(getContext(), Ty, true))
4465 return ABIArgInfo::getIgnore();
4467 if (getABIKind() == ARMABIInfo::AAPCS_VFP && !isVariadic) {
4468 // Homogeneous Aggregates need to be expanded when we can fit the aggregate
4469 // into VFP registers.
4470 const Type *Base = nullptr;
4471 uint64_t Members = 0;
4472 if (isHomogeneousAggregate(Ty, Base, getContext(), &Members)) {
4473 assert(Base && "Base class should be set for homogeneous aggregate");
4474 // Base can be a floating-point or a vector.
4475 if (Base->isVectorType()) {
4476 // ElementSize is in number of floats.
4477 unsigned ElementSize = getContext().getTypeSize(Base) == 64 ? 2 : 4;
4478 markAllocatedVFPs(ElementSize,
4479 Members * ElementSize);
4480 } else if (Base->isSpecificBuiltinType(BuiltinType::Float))
4481 markAllocatedVFPs(1, Members);
4483 assert(Base->isSpecificBuiltinType(BuiltinType::Double) ||
4484 Base->isSpecificBuiltinType(BuiltinType::LongDouble));
4485 markAllocatedVFPs(2, Members * 2);
4488 return ABIArgInfo::getDirect();
4492 // Support byval for ARM.
4493 // The ABI alignment for APCS is 4-byte and for AAPCS at least 4-byte and at
4494 // most 8-byte. We realign the indirect argument if type alignment is bigger
4495 // than ABI alignment.
4496 uint64_t ABIAlign = 4;
4497 uint64_t TyAlign = getContext().getTypeAlign(Ty) / 8;
4498 if (getABIKind() == ARMABIInfo::AAPCS_VFP ||
4499 getABIKind() == ARMABIInfo::AAPCS)
4500 ABIAlign = std::min(std::max(TyAlign, (uint64_t)4), (uint64_t)8);
4501 if (getContext().getTypeSizeInChars(Ty) > CharUnits::fromQuantity(64)) {
4502 // Update Allocated GPRs. Since this is only used when the size of the
4503 // argument is greater than 64 bytes, this will always use up any available
4504 // registers (of which there are 4). We also don't care about getting the
4505 // alignment right, because general-purpose registers cannot be back-filled.
4506 markAllocatedGPRs(1, 4);
4507 return ABIArgInfo::getIndirect(TyAlign, /*ByVal=*/true,
4508 /*Realign=*/TyAlign > ABIAlign);
4511 // Otherwise, pass by coercing to a structure of the appropriate size.
4514 // FIXME: Try to match the types of the arguments more accurately where
4516 if (getContext().getTypeAlign(Ty) <= 32) {
4517 ElemTy = llvm::Type::getInt32Ty(getVMContext());
4518 SizeRegs = (getContext().getTypeSize(Ty) + 31) / 32;
4519 markAllocatedGPRs(1, SizeRegs);
4521 ElemTy = llvm::Type::getInt64Ty(getVMContext());
4522 SizeRegs = (getContext().getTypeSize(Ty) + 63) / 64;
4523 markAllocatedGPRs(2, SizeRegs * 2);
4527 llvm::StructType::get(llvm::ArrayType::get(ElemTy, SizeRegs), NULL);
4528 return ABIArgInfo::getDirect(STy);
4531 static bool isIntegerLikeType(QualType Ty, ASTContext &Context,
4532 llvm::LLVMContext &VMContext) {
4533 // APCS, C Language Calling Conventions, Non-Simple Return Values: A structure
4534 // is called integer-like if its size is less than or equal to one word, and
4535 // the offset of each of its addressable sub-fields is zero.
4537 uint64_t Size = Context.getTypeSize(Ty);
4539 // Check that the type fits in a word.
4543 // FIXME: Handle vector types!
4544 if (Ty->isVectorType())
4547 // Float types are never treated as "integer like".
4548 if (Ty->isRealFloatingType())
4551 // If this is a builtin or pointer type then it is ok.
4552 if (Ty->getAs<BuiltinType>() || Ty->isPointerType())
4555 // Small complex integer types are "integer like".
4556 if (const ComplexType *CT = Ty->getAs<ComplexType>())
4557 return isIntegerLikeType(CT->getElementType(), Context, VMContext);
4559 // Single element and zero sized arrays should be allowed, by the definition
4560 // above, but they are not.
4562 // Otherwise, it must be a record type.
4563 const RecordType *RT = Ty->getAs<RecordType>();
4564 if (!RT) return false;
4566 // Ignore records with flexible arrays.
4567 const RecordDecl *RD = RT->getDecl();
4568 if (RD->hasFlexibleArrayMember())
4571 // Check that all sub-fields are at offset 0, and are themselves "integer
4573 const ASTRecordLayout &Layout = Context.getASTRecordLayout(RD);
4575 bool HadField = false;
4577 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
4578 i != e; ++i, ++idx) {
4579 const FieldDecl *FD = *i;
4581 // Bit-fields are not addressable, we only need to verify they are "integer
4582 // like". We still have to disallow a subsequent non-bitfield, for example:
4583 // struct { int : 0; int x }
4584 // is non-integer like according to gcc.
4585 if (FD->isBitField()) {
4589 if (!isIntegerLikeType(FD->getType(), Context, VMContext))
4595 // Check if this field is at offset 0.
4596 if (Layout.getFieldOffset(idx) != 0)
4599 if (!isIntegerLikeType(FD->getType(), Context, VMContext))
4602 // Only allow at most one field in a structure. This doesn't match the
4603 // wording above, but follows gcc in situations with a field following an
4605 if (!RD->isUnion()) {
4616 ABIArgInfo ARMABIInfo::classifyReturnType(QualType RetTy,
4617 bool isVariadic) const {
4618 if (RetTy->isVoidType())
4619 return ABIArgInfo::getIgnore();
4621 // Large vector types should be returned via memory.
4622 if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 128) {
4623 markAllocatedGPRs(1, 1);
4624 return ABIArgInfo::getIndirect(0);
4627 if (!isAggregateTypeForABI(RetTy)) {
4628 // Treat an enum type as its underlying type.
4629 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
4630 RetTy = EnumTy->getDecl()->getIntegerType();
4632 return (RetTy->isPromotableIntegerType() ?
4633 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
4636 // Are we following APCS?
4637 if (getABIKind() == APCS) {
4638 if (isEmptyRecord(getContext(), RetTy, false))
4639 return ABIArgInfo::getIgnore();
4641 // Complex types are all returned as packed integers.
4643 // FIXME: Consider using 2 x vector types if the back end handles them
4645 if (RetTy->isAnyComplexType())
4646 return ABIArgInfo::getDirect(llvm::IntegerType::get(getVMContext(),
4647 getContext().getTypeSize(RetTy)));
4649 // Integer like structures are returned in r0.
4650 if (isIntegerLikeType(RetTy, getContext(), getVMContext())) {
4651 // Return in the smallest viable integer type.
4652 uint64_t Size = getContext().getTypeSize(RetTy);
4654 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
4656 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
4657 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
4660 // Otherwise return in memory.
4661 markAllocatedGPRs(1, 1);
4662 return ABIArgInfo::getIndirect(0);
4665 // Otherwise this is an AAPCS variant.
4667 if (isEmptyRecord(getContext(), RetTy, true))
4668 return ABIArgInfo::getIgnore();
4670 // Check for homogeneous aggregates with AAPCS-VFP.
4671 if (getABIKind() == AAPCS_VFP && !isVariadic) {
4672 const Type *Base = nullptr;
4673 if (isHomogeneousAggregate(RetTy, Base, getContext())) {
4674 assert(Base && "Base class should be set for homogeneous aggregate");
4675 // Homogeneous Aggregates are returned directly.
4676 return ABIArgInfo::getDirect();
4680 // Aggregates <= 4 bytes are returned in r0; other aggregates
4681 // are returned indirectly.
4682 uint64_t Size = getContext().getTypeSize(RetTy);
4684 if (getDataLayout().isBigEndian())
4685 // Return in 32 bit integer integer type (as if loaded by LDR, AAPCS 5.4)
4686 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
4688 // Return in the smallest viable integer type.
4690 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
4692 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
4693 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
4696 markAllocatedGPRs(1, 1);
4697 return ABIArgInfo::getIndirect(0);
4700 /// isIllegalVector - check whether Ty is an illegal vector type.
4701 bool ARMABIInfo::isIllegalVectorType(QualType Ty) const {
4702 if (const VectorType *VT = Ty->getAs<VectorType>()) {
4703 // Check whether VT is legal.
4704 unsigned NumElements = VT->getNumElements();
4705 uint64_t Size = getContext().getTypeSize(VT);
4706 // NumElements should be power of 2.
4707 if ((NumElements & (NumElements - 1)) != 0)
4709 // Size should be greater than 32 bits.
4715 llvm::Value *ARMABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
4716 CodeGenFunction &CGF) const {
4717 llvm::Type *BP = CGF.Int8PtrTy;
4718 llvm::Type *BPP = CGF.Int8PtrPtrTy;
4720 CGBuilderTy &Builder = CGF.Builder;
4721 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap");
4722 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
4724 if (isEmptyRecord(getContext(), Ty, true)) {
4725 // These are ignored for parameter passing purposes.
4726 llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
4727 return Builder.CreateBitCast(Addr, PTy);
4730 uint64_t Size = CGF.getContext().getTypeSize(Ty) / 8;
4731 uint64_t TyAlign = CGF.getContext().getTypeAlign(Ty) / 8;
4732 bool IsIndirect = false;
4734 // The ABI alignment for 64-bit or 128-bit vectors is 8 for AAPCS and 4 for
4735 // APCS. For AAPCS, the ABI alignment is at least 4-byte and at most 8-byte.
4736 if (getABIKind() == ARMABIInfo::AAPCS_VFP ||
4737 getABIKind() == ARMABIInfo::AAPCS)
4738 TyAlign = std::min(std::max(TyAlign, (uint64_t)4), (uint64_t)8);
4741 // Use indirect if size of the illegal vector is bigger than 16 bytes.
4742 if (isIllegalVectorType(Ty) && Size > 16) {
4748 // Handle address alignment for ABI alignment > 4 bytes.
4750 assert((TyAlign & (TyAlign - 1)) == 0 &&
4751 "Alignment is not power of 2!");
4752 llvm::Value *AddrAsInt = Builder.CreatePtrToInt(Addr, CGF.Int32Ty);
4753 AddrAsInt = Builder.CreateAdd(AddrAsInt, Builder.getInt32(TyAlign - 1));
4754 AddrAsInt = Builder.CreateAnd(AddrAsInt, Builder.getInt32(~(TyAlign - 1)));
4755 Addr = Builder.CreateIntToPtr(AddrAsInt, BP, "ap.align");
4759 llvm::RoundUpToAlignment(Size, 4);
4760 llvm::Value *NextAddr =
4761 Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset),
4763 Builder.CreateStore(NextAddr, VAListAddrAsBPP);
4766 Addr = Builder.CreateLoad(Builder.CreateBitCast(Addr, BPP));
4767 else if (TyAlign < CGF.getContext().getTypeAlign(Ty) / 8) {
4768 // We can't directly cast ap.cur to pointer to a vector type, since ap.cur
4769 // may not be correctly aligned for the vector type. We create an aligned
4770 // temporary space and copy the content over from ap.cur to the temporary
4771 // space. This is necessary if the natural alignment of the type is greater
4772 // than the ABI alignment.
4773 llvm::Type *I8PtrTy = Builder.getInt8PtrTy();
4774 CharUnits CharSize = getContext().getTypeSizeInChars(Ty);
4775 llvm::Value *AlignedTemp = CGF.CreateTempAlloca(CGF.ConvertType(Ty),
4777 llvm::Value *Dst = Builder.CreateBitCast(AlignedTemp, I8PtrTy);
4778 llvm::Value *Src = Builder.CreateBitCast(Addr, I8PtrTy);
4779 Builder.CreateMemCpy(Dst, Src,
4780 llvm::ConstantInt::get(CGF.IntPtrTy, CharSize.getQuantity()),
4782 Addr = AlignedTemp; //The content is in aligned location.
4785 llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
4786 llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
4793 class NaClARMABIInfo : public ABIInfo {
4795 NaClARMABIInfo(CodeGen::CodeGenTypes &CGT, ARMABIInfo::ABIKind Kind)
4796 : ABIInfo(CGT), PInfo(CGT), NInfo(CGT, Kind) {}
4797 void computeInfo(CGFunctionInfo &FI) const override;
4798 llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
4799 CodeGenFunction &CGF) const override;
4801 PNaClABIInfo PInfo; // Used for generating calls with pnaclcall callingconv.
4802 ARMABIInfo NInfo; // Used for everything else.
4805 class NaClARMTargetCodeGenInfo : public TargetCodeGenInfo {
4807 NaClARMTargetCodeGenInfo(CodeGen::CodeGenTypes &CGT, ARMABIInfo::ABIKind Kind)
4808 : TargetCodeGenInfo(new NaClARMABIInfo(CGT, Kind)) {}
4813 void NaClARMABIInfo::computeInfo(CGFunctionInfo &FI) const {
4814 if (FI.getASTCallingConvention() == CC_PnaclCall)
4815 PInfo.computeInfo(FI);
4817 static_cast<const ABIInfo&>(NInfo).computeInfo(FI);
4820 llvm::Value *NaClARMABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
4821 CodeGenFunction &CGF) const {
4822 // Always use the native convention; calling pnacl-style varargs functions
4824 return static_cast<const ABIInfo&>(NInfo).EmitVAArg(VAListAddr, Ty, CGF);
4827 //===----------------------------------------------------------------------===//
4828 // NVPTX ABI Implementation
4829 //===----------------------------------------------------------------------===//
4833 class NVPTXABIInfo : public ABIInfo {
4835 NVPTXABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
4837 ABIArgInfo classifyReturnType(QualType RetTy) const;
4838 ABIArgInfo classifyArgumentType(QualType Ty) const;
4840 void computeInfo(CGFunctionInfo &FI) const override;
4841 llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
4842 CodeGenFunction &CFG) const override;
4845 class NVPTXTargetCodeGenInfo : public TargetCodeGenInfo {
4847 NVPTXTargetCodeGenInfo(CodeGenTypes &CGT)
4848 : TargetCodeGenInfo(new NVPTXABIInfo(CGT)) {}
4850 void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
4851 CodeGen::CodeGenModule &M) const override;
4853 // Adds a NamedMDNode with F, Name, and Operand as operands, and adds the
4854 // resulting MDNode to the nvvm.annotations MDNode.
4855 static void addNVVMMetadata(llvm::Function *F, StringRef Name, int Operand);
4858 ABIArgInfo NVPTXABIInfo::classifyReturnType(QualType RetTy) const {
4859 if (RetTy->isVoidType())
4860 return ABIArgInfo::getIgnore();
4862 // note: this is different from default ABI
4863 if (!RetTy->isScalarType())
4864 return ABIArgInfo::getDirect();
4866 // Treat an enum type as its underlying type.
4867 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
4868 RetTy = EnumTy->getDecl()->getIntegerType();
4870 return (RetTy->isPromotableIntegerType() ?
4871 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
4874 ABIArgInfo NVPTXABIInfo::classifyArgumentType(QualType Ty) const {
4875 // Treat an enum type as its underlying type.
4876 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
4877 Ty = EnumTy->getDecl()->getIntegerType();
4879 return (Ty->isPromotableIntegerType() ?
4880 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
4883 void NVPTXABIInfo::computeInfo(CGFunctionInfo &FI) const {
4884 if (!getCXXABI().classifyReturnType(FI))
4885 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
4886 for (auto &I : FI.arguments())
4887 I.info = classifyArgumentType(I.type);
4889 // Always honor user-specified calling convention.
4890 if (FI.getCallingConvention() != llvm::CallingConv::C)
4893 FI.setEffectiveCallingConvention(getRuntimeCC());
4896 llvm::Value *NVPTXABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
4897 CodeGenFunction &CFG) const {
4898 llvm_unreachable("NVPTX does not support varargs");
4901 void NVPTXTargetCodeGenInfo::
4902 SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
4903 CodeGen::CodeGenModule &M) const{
4904 const FunctionDecl *FD = dyn_cast<FunctionDecl>(D);
4907 llvm::Function *F = cast<llvm::Function>(GV);
4909 // Perform special handling in OpenCL mode
4910 if (M.getLangOpts().OpenCL) {
4911 // Use OpenCL function attributes to check for kernel functions
4912 // By default, all functions are device functions
4913 if (FD->hasAttr<OpenCLKernelAttr>()) {
4914 // OpenCL __kernel functions get kernel metadata
4915 // Create !{<func-ref>, metadata !"kernel", i32 1} node
4916 addNVVMMetadata(F, "kernel", 1);
4917 // And kernel functions are not subject to inlining
4918 F->addFnAttr(llvm::Attribute::NoInline);
4922 // Perform special handling in CUDA mode.
4923 if (M.getLangOpts().CUDA) {
4924 // CUDA __global__ functions get a kernel metadata entry. Since
4925 // __global__ functions cannot be called from the device, we do not
4926 // need to set the noinline attribute.
4927 if (FD->hasAttr<CUDAGlobalAttr>()) {
4928 // Create !{<func-ref>, metadata !"kernel", i32 1} node
4929 addNVVMMetadata(F, "kernel", 1);
4931 if (FD->hasAttr<CUDALaunchBoundsAttr>()) {
4932 // Create !{<func-ref>, metadata !"maxntidx", i32 <val>} node
4933 addNVVMMetadata(F, "maxntidx",
4934 FD->getAttr<CUDALaunchBoundsAttr>()->getMaxThreads());
4935 // min blocks is a default argument for CUDALaunchBoundsAttr, so getting a
4936 // zero value from getMinBlocks either means it was not specified in
4937 // __launch_bounds__ or the user specified a 0 value. In both cases, we
4938 // don't have to add a PTX directive.
4939 int MinCTASM = FD->getAttr<CUDALaunchBoundsAttr>()->getMinBlocks();
4941 // Create !{<func-ref>, metadata !"minctasm", i32 <val>} node
4942 addNVVMMetadata(F, "minctasm", MinCTASM);
4948 void NVPTXTargetCodeGenInfo::addNVVMMetadata(llvm::Function *F, StringRef Name,
4950 llvm::Module *M = F->getParent();
4951 llvm::LLVMContext &Ctx = M->getContext();
4953 // Get "nvvm.annotations" metadata node
4954 llvm::NamedMDNode *MD = M->getOrInsertNamedMetadata("nvvm.annotations");
4956 llvm::Value *MDVals[] = {
4957 F, llvm::MDString::get(Ctx, Name),
4958 llvm::ConstantInt::get(llvm::Type::getInt32Ty(Ctx), Operand)};
4959 // Append metadata to nvvm.annotations
4960 MD->addOperand(llvm::MDNode::get(Ctx, MDVals));
4964 //===----------------------------------------------------------------------===//
4965 // SystemZ ABI Implementation
4966 //===----------------------------------------------------------------------===//
4970 class SystemZABIInfo : public ABIInfo {
4972 SystemZABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
4974 bool isPromotableIntegerType(QualType Ty) const;
4975 bool isCompoundType(QualType Ty) const;
4976 bool isFPArgumentType(QualType Ty) const;
4978 ABIArgInfo classifyReturnType(QualType RetTy) const;
4979 ABIArgInfo classifyArgumentType(QualType ArgTy) const;
4981 void computeInfo(CGFunctionInfo &FI) const override {
4982 if (!getCXXABI().classifyReturnType(FI))
4983 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
4984 for (auto &I : FI.arguments())
4985 I.info = classifyArgumentType(I.type);
4988 llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
4989 CodeGenFunction &CGF) const override;
4992 class SystemZTargetCodeGenInfo : public TargetCodeGenInfo {
4994 SystemZTargetCodeGenInfo(CodeGenTypes &CGT)
4995 : TargetCodeGenInfo(new SystemZABIInfo(CGT)) {}
5000 bool SystemZABIInfo::isPromotableIntegerType(QualType Ty) const {
5001 // Treat an enum type as its underlying type.
5002 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
5003 Ty = EnumTy->getDecl()->getIntegerType();
5005 // Promotable integer types are required to be promoted by the ABI.
5006 if (Ty->isPromotableIntegerType())
5009 // 32-bit values must also be promoted.
5010 if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
5011 switch (BT->getKind()) {
5012 case BuiltinType::Int:
5013 case BuiltinType::UInt:
5021 bool SystemZABIInfo::isCompoundType(QualType Ty) const {
5022 return Ty->isAnyComplexType() || isAggregateTypeForABI(Ty);
5025 bool SystemZABIInfo::isFPArgumentType(QualType Ty) const {
5026 if (const BuiltinType *BT = Ty->getAs<BuiltinType>())
5027 switch (BT->getKind()) {
5028 case BuiltinType::Float:
5029 case BuiltinType::Double:
5035 if (const RecordType *RT = Ty->getAsStructureType()) {
5036 const RecordDecl *RD = RT->getDecl();
5039 // If this is a C++ record, check the bases first.
5040 if (const CXXRecordDecl *CXXRD = dyn_cast<CXXRecordDecl>(RD))
5041 for (const auto &I : CXXRD->bases()) {
5042 QualType Base = I.getType();
5044 // Empty bases don't affect things either way.
5045 if (isEmptyRecord(getContext(), Base, true))
5050 Found = isFPArgumentType(Base);
5055 // Check the fields.
5056 for (const auto *FD : RD->fields()) {
5057 // Empty bitfields don't affect things either way.
5058 // Unlike isSingleElementStruct(), empty structure and array fields
5059 // do count. So do anonymous bitfields that aren't zero-sized.
5060 if (FD->isBitField() && FD->getBitWidthValue(getContext()) == 0)
5063 // Unlike isSingleElementStruct(), arrays do not count.
5064 // Nested isFPArgumentType structures still do though.
5067 Found = isFPArgumentType(FD->getType());
5072 // Unlike isSingleElementStruct(), trailing padding is allowed.
5073 // An 8-byte aligned struct s { float f; } is passed as a double.
5080 llvm::Value *SystemZABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
5081 CodeGenFunction &CGF) const {
5082 // Assume that va_list type is correct; should be pointer to LLVM type:
5086 // i8 *__overflow_arg_area;
5087 // i8 *__reg_save_area;
5090 // Every argument occupies 8 bytes and is passed by preference in either
5092 Ty = CGF.getContext().getCanonicalType(Ty);
5093 ABIArgInfo AI = classifyArgumentType(Ty);
5094 bool InFPRs = isFPArgumentType(Ty);
5096 llvm::Type *APTy = llvm::PointerType::getUnqual(CGF.ConvertTypeForMem(Ty));
5097 bool IsIndirect = AI.isIndirect();
5098 unsigned UnpaddedBitSize;
5100 APTy = llvm::PointerType::getUnqual(APTy);
5101 UnpaddedBitSize = 64;
5103 UnpaddedBitSize = getContext().getTypeSize(Ty);
5104 unsigned PaddedBitSize = 64;
5105 assert((UnpaddedBitSize <= PaddedBitSize) && "Invalid argument size.");
5107 unsigned PaddedSize = PaddedBitSize / 8;
5108 unsigned Padding = (PaddedBitSize - UnpaddedBitSize) / 8;
5110 unsigned MaxRegs, RegCountField, RegSaveIndex, RegPadding;
5112 MaxRegs = 4; // Maximum of 4 FPR arguments
5113 RegCountField = 1; // __fpr
5114 RegSaveIndex = 16; // save offset for f0
5115 RegPadding = 0; // floats are passed in the high bits of an FPR
5117 MaxRegs = 5; // Maximum of 5 GPR arguments
5118 RegCountField = 0; // __gpr
5119 RegSaveIndex = 2; // save offset for r2
5120 RegPadding = Padding; // values are passed in the low bits of a GPR
5123 llvm::Value *RegCountPtr =
5124 CGF.Builder.CreateStructGEP(VAListAddr, RegCountField, "reg_count_ptr");
5125 llvm::Value *RegCount = CGF.Builder.CreateLoad(RegCountPtr, "reg_count");
5126 llvm::Type *IndexTy = RegCount->getType();
5127 llvm::Value *MaxRegsV = llvm::ConstantInt::get(IndexTy, MaxRegs);
5128 llvm::Value *InRegs = CGF.Builder.CreateICmpULT(RegCount, MaxRegsV,
5131 llvm::BasicBlock *InRegBlock = CGF.createBasicBlock("vaarg.in_reg");
5132 llvm::BasicBlock *InMemBlock = CGF.createBasicBlock("vaarg.in_mem");
5133 llvm::BasicBlock *ContBlock = CGF.createBasicBlock("vaarg.end");
5134 CGF.Builder.CreateCondBr(InRegs, InRegBlock, InMemBlock);
5136 // Emit code to load the value if it was passed in registers.
5137 CGF.EmitBlock(InRegBlock);
5139 // Work out the address of an argument register.
5140 llvm::Value *PaddedSizeV = llvm::ConstantInt::get(IndexTy, PaddedSize);
5141 llvm::Value *ScaledRegCount =
5142 CGF.Builder.CreateMul(RegCount, PaddedSizeV, "scaled_reg_count");
5143 llvm::Value *RegBase =
5144 llvm::ConstantInt::get(IndexTy, RegSaveIndex * PaddedSize + RegPadding);
5145 llvm::Value *RegOffset =
5146 CGF.Builder.CreateAdd(ScaledRegCount, RegBase, "reg_offset");
5147 llvm::Value *RegSaveAreaPtr =
5148 CGF.Builder.CreateStructGEP(VAListAddr, 3, "reg_save_area_ptr");
5149 llvm::Value *RegSaveArea =
5150 CGF.Builder.CreateLoad(RegSaveAreaPtr, "reg_save_area");
5151 llvm::Value *RawRegAddr =
5152 CGF.Builder.CreateGEP(RegSaveArea, RegOffset, "raw_reg_addr");
5153 llvm::Value *RegAddr =
5154 CGF.Builder.CreateBitCast(RawRegAddr, APTy, "reg_addr");
5156 // Update the register count
5157 llvm::Value *One = llvm::ConstantInt::get(IndexTy, 1);
5158 llvm::Value *NewRegCount =
5159 CGF.Builder.CreateAdd(RegCount, One, "reg_count");
5160 CGF.Builder.CreateStore(NewRegCount, RegCountPtr);
5161 CGF.EmitBranch(ContBlock);
5163 // Emit code to load the value if it was passed in memory.
5164 CGF.EmitBlock(InMemBlock);
5166 // Work out the address of a stack argument.
5167 llvm::Value *OverflowArgAreaPtr =
5168 CGF.Builder.CreateStructGEP(VAListAddr, 2, "overflow_arg_area_ptr");
5169 llvm::Value *OverflowArgArea =
5170 CGF.Builder.CreateLoad(OverflowArgAreaPtr, "overflow_arg_area");
5171 llvm::Value *PaddingV = llvm::ConstantInt::get(IndexTy, Padding);
5172 llvm::Value *RawMemAddr =
5173 CGF.Builder.CreateGEP(OverflowArgArea, PaddingV, "raw_mem_addr");
5174 llvm::Value *MemAddr =
5175 CGF.Builder.CreateBitCast(RawMemAddr, APTy, "mem_addr");
5177 // Update overflow_arg_area_ptr pointer
5178 llvm::Value *NewOverflowArgArea =
5179 CGF.Builder.CreateGEP(OverflowArgArea, PaddedSizeV, "overflow_arg_area");
5180 CGF.Builder.CreateStore(NewOverflowArgArea, OverflowArgAreaPtr);
5181 CGF.EmitBranch(ContBlock);
5183 // Return the appropriate result.
5184 CGF.EmitBlock(ContBlock);
5185 llvm::PHINode *ResAddr = CGF.Builder.CreatePHI(APTy, 2, "va_arg.addr");
5186 ResAddr->addIncoming(RegAddr, InRegBlock);
5187 ResAddr->addIncoming(MemAddr, InMemBlock);
5190 return CGF.Builder.CreateLoad(ResAddr, "indirect_arg");
5195 ABIArgInfo SystemZABIInfo::classifyReturnType(QualType RetTy) const {
5196 if (RetTy->isVoidType())
5197 return ABIArgInfo::getIgnore();
5198 if (isCompoundType(RetTy) || getContext().getTypeSize(RetTy) > 64)
5199 return ABIArgInfo::getIndirect(0);
5200 return (isPromotableIntegerType(RetTy) ?
5201 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
5204 ABIArgInfo SystemZABIInfo::classifyArgumentType(QualType Ty) const {
5205 // Handle the generic C++ ABI.
5206 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
5207 return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
5209 // Integers and enums are extended to full register width.
5210 if (isPromotableIntegerType(Ty))
5211 return ABIArgInfo::getExtend();
5213 // Values that are not 1, 2, 4 or 8 bytes in size are passed indirectly.
5214 uint64_t Size = getContext().getTypeSize(Ty);
5215 if (Size != 8 && Size != 16 && Size != 32 && Size != 64)
5216 return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
5218 // Handle small structures.
5219 if (const RecordType *RT = Ty->getAs<RecordType>()) {
5220 // Structures with flexible arrays have variable length, so really
5221 // fail the size test above.
5222 const RecordDecl *RD = RT->getDecl();
5223 if (RD->hasFlexibleArrayMember())
5224 return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
5226 // The structure is passed as an unextended integer, a float, or a double.
5228 if (isFPArgumentType(Ty)) {
5229 assert(Size == 32 || Size == 64);
5231 PassTy = llvm::Type::getFloatTy(getVMContext());
5233 PassTy = llvm::Type::getDoubleTy(getVMContext());
5235 PassTy = llvm::IntegerType::get(getVMContext(), Size);
5236 return ABIArgInfo::getDirect(PassTy);
5239 // Non-structure compounds are passed indirectly.
5240 if (isCompoundType(Ty))
5241 return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
5243 return ABIArgInfo::getDirect(nullptr);
5246 //===----------------------------------------------------------------------===//
5247 // MSP430 ABI Implementation
5248 //===----------------------------------------------------------------------===//
5252 class MSP430TargetCodeGenInfo : public TargetCodeGenInfo {
5254 MSP430TargetCodeGenInfo(CodeGenTypes &CGT)
5255 : TargetCodeGenInfo(new DefaultABIInfo(CGT)) {}
5256 void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
5257 CodeGen::CodeGenModule &M) const override;
5262 void MSP430TargetCodeGenInfo::SetTargetAttributes(const Decl *D,
5263 llvm::GlobalValue *GV,
5264 CodeGen::CodeGenModule &M) const {
5265 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
5266 if (const MSP430InterruptAttr *attr = FD->getAttr<MSP430InterruptAttr>()) {
5267 // Handle 'interrupt' attribute:
5268 llvm::Function *F = cast<llvm::Function>(GV);
5270 // Step 1: Set ISR calling convention.
5271 F->setCallingConv(llvm::CallingConv::MSP430_INTR);
5273 // Step 2: Add attributes goodness.
5274 F->addFnAttr(llvm::Attribute::NoInline);
5276 // Step 3: Emit ISR vector alias.
5277 unsigned Num = attr->getNumber() / 2;
5278 llvm::GlobalAlias::create(llvm::Function::ExternalLinkage,
5279 "__isr_" + Twine(Num), F);
5284 //===----------------------------------------------------------------------===//
5285 // MIPS ABI Implementation. This works for both little-endian and
5286 // big-endian variants.
5287 //===----------------------------------------------------------------------===//
5290 class MipsABIInfo : public ABIInfo {
5292 unsigned MinABIStackAlignInBytes, StackAlignInBytes;
5293 void CoerceToIntArgs(uint64_t TySize,
5294 SmallVectorImpl<llvm::Type *> &ArgList) const;
5295 llvm::Type* HandleAggregates(QualType Ty, uint64_t TySize) const;
5296 llvm::Type* returnAggregateInRegs(QualType RetTy, uint64_t Size) const;
5297 llvm::Type* getPaddingType(uint64_t Align, uint64_t Offset) const;
5299 MipsABIInfo(CodeGenTypes &CGT, bool _IsO32) :
5300 ABIInfo(CGT), IsO32(_IsO32), MinABIStackAlignInBytes(IsO32 ? 4 : 8),
5301 StackAlignInBytes(IsO32 ? 8 : 16) {}
5303 ABIArgInfo classifyReturnType(QualType RetTy) const;
5304 ABIArgInfo classifyArgumentType(QualType RetTy, uint64_t &Offset) const;
5305 void computeInfo(CGFunctionInfo &FI) const override;
5306 llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
5307 CodeGenFunction &CGF) const override;
5310 class MIPSTargetCodeGenInfo : public TargetCodeGenInfo {
5311 unsigned SizeOfUnwindException;
5313 MIPSTargetCodeGenInfo(CodeGenTypes &CGT, bool IsO32)
5314 : TargetCodeGenInfo(new MipsABIInfo(CGT, IsO32)),
5315 SizeOfUnwindException(IsO32 ? 24 : 32) {}
5317 int getDwarfEHStackPointer(CodeGen::CodeGenModule &CGM) const override {
5321 void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
5322 CodeGen::CodeGenModule &CGM) const override {
5323 const FunctionDecl *FD = dyn_cast<FunctionDecl>(D);
5325 llvm::Function *Fn = cast<llvm::Function>(GV);
5326 if (FD->hasAttr<Mips16Attr>()) {
5327 Fn->addFnAttr("mips16");
5329 else if (FD->hasAttr<NoMips16Attr>()) {
5330 Fn->addFnAttr("nomips16");
5334 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
5335 llvm::Value *Address) const override;
5337 unsigned getSizeOfUnwindException() const override {
5338 return SizeOfUnwindException;
5343 void MipsABIInfo::CoerceToIntArgs(uint64_t TySize,
5344 SmallVectorImpl<llvm::Type *> &ArgList) const {
5345 llvm::IntegerType *IntTy =
5346 llvm::IntegerType::get(getVMContext(), MinABIStackAlignInBytes * 8);
5348 // Add (TySize / MinABIStackAlignInBytes) args of IntTy.
5349 for (unsigned N = TySize / (MinABIStackAlignInBytes * 8); N; --N)
5350 ArgList.push_back(IntTy);
5352 // If necessary, add one more integer type to ArgList.
5353 unsigned R = TySize % (MinABIStackAlignInBytes * 8);
5356 ArgList.push_back(llvm::IntegerType::get(getVMContext(), R));
5359 // In N32/64, an aligned double precision floating point field is passed in
5361 llvm::Type* MipsABIInfo::HandleAggregates(QualType Ty, uint64_t TySize) const {
5362 SmallVector<llvm::Type*, 8> ArgList, IntArgList;
5365 CoerceToIntArgs(TySize, ArgList);
5366 return llvm::StructType::get(getVMContext(), ArgList);
5369 if (Ty->isComplexType())
5370 return CGT.ConvertType(Ty);
5372 const RecordType *RT = Ty->getAs<RecordType>();
5374 // Unions/vectors are passed in integer registers.
5375 if (!RT || !RT->isStructureOrClassType()) {
5376 CoerceToIntArgs(TySize, ArgList);
5377 return llvm::StructType::get(getVMContext(), ArgList);
5380 const RecordDecl *RD = RT->getDecl();
5381 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
5382 assert(!(TySize % 8) && "Size of structure must be multiple of 8.");
5384 uint64_t LastOffset = 0;
5386 llvm::IntegerType *I64 = llvm::IntegerType::get(getVMContext(), 64);
5388 // Iterate over fields in the struct/class and check if there are any aligned
5390 for (RecordDecl::field_iterator i = RD->field_begin(), e = RD->field_end();
5391 i != e; ++i, ++idx) {
5392 const QualType Ty = i->getType();
5393 const BuiltinType *BT = Ty->getAs<BuiltinType>();
5395 if (!BT || BT->getKind() != BuiltinType::Double)
5398 uint64_t Offset = Layout.getFieldOffset(idx);
5399 if (Offset % 64) // Ignore doubles that are not aligned.
5402 // Add ((Offset - LastOffset) / 64) args of type i64.
5403 for (unsigned j = (Offset - LastOffset) / 64; j > 0; --j)
5404 ArgList.push_back(I64);
5407 ArgList.push_back(llvm::Type::getDoubleTy(getVMContext()));
5408 LastOffset = Offset + 64;
5411 CoerceToIntArgs(TySize - LastOffset, IntArgList);
5412 ArgList.append(IntArgList.begin(), IntArgList.end());
5414 return llvm::StructType::get(getVMContext(), ArgList);
5417 llvm::Type *MipsABIInfo::getPaddingType(uint64_t OrigOffset,
5418 uint64_t Offset) const {
5419 if (OrigOffset + MinABIStackAlignInBytes > Offset)
5422 return llvm::IntegerType::get(getVMContext(), (Offset - OrigOffset) * 8);
5426 MipsABIInfo::classifyArgumentType(QualType Ty, uint64_t &Offset) const {
5427 uint64_t OrigOffset = Offset;
5428 uint64_t TySize = getContext().getTypeSize(Ty);
5429 uint64_t Align = getContext().getTypeAlign(Ty) / 8;
5431 Align = std::min(std::max(Align, (uint64_t)MinABIStackAlignInBytes),
5432 (uint64_t)StackAlignInBytes);
5433 unsigned CurrOffset = llvm::RoundUpToAlignment(Offset, Align);
5434 Offset = CurrOffset + llvm::RoundUpToAlignment(TySize, Align * 8) / 8;
5436 if (isAggregateTypeForABI(Ty) || Ty->isVectorType()) {
5437 // Ignore empty aggregates.
5439 return ABIArgInfo::getIgnore();
5441 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI())) {
5442 Offset = OrigOffset + MinABIStackAlignInBytes;
5443 return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
5446 // If we have reached here, aggregates are passed directly by coercing to
5447 // another structure type. Padding is inserted if the offset of the
5448 // aggregate is unaligned.
5449 return ABIArgInfo::getDirect(HandleAggregates(Ty, TySize), 0,
5450 getPaddingType(OrigOffset, CurrOffset));
5453 // Treat an enum type as its underlying type.
5454 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
5455 Ty = EnumTy->getDecl()->getIntegerType();
5457 if (Ty->isPromotableIntegerType())
5458 return ABIArgInfo::getExtend();
5460 return ABIArgInfo::getDirect(
5461 nullptr, 0, IsO32 ? nullptr : getPaddingType(OrigOffset, CurrOffset));
5465 MipsABIInfo::returnAggregateInRegs(QualType RetTy, uint64_t Size) const {
5466 const RecordType *RT = RetTy->getAs<RecordType>();
5467 SmallVector<llvm::Type*, 8> RTList;
5469 if (RT && RT->isStructureOrClassType()) {
5470 const RecordDecl *RD = RT->getDecl();
5471 const ASTRecordLayout &Layout = getContext().getASTRecordLayout(RD);
5472 unsigned FieldCnt = Layout.getFieldCount();
5474 // N32/64 returns struct/classes in floating point registers if the
5475 // following conditions are met:
5476 // 1. The size of the struct/class is no larger than 128-bit.
5477 // 2. The struct/class has one or two fields all of which are floating
5479 // 3. The offset of the first field is zero (this follows what gcc does).
5481 // Any other composite results are returned in integer registers.
5483 if (FieldCnt && (FieldCnt <= 2) && !Layout.getFieldOffset(0)) {
5484 RecordDecl::field_iterator b = RD->field_begin(), e = RD->field_end();
5485 for (; b != e; ++b) {
5486 const BuiltinType *BT = b->getType()->getAs<BuiltinType>();
5488 if (!BT || !BT->isFloatingPoint())
5491 RTList.push_back(CGT.ConvertType(b->getType()));
5495 return llvm::StructType::get(getVMContext(), RTList,
5496 RD->hasAttr<PackedAttr>());
5502 CoerceToIntArgs(Size, RTList);
5503 return llvm::StructType::get(getVMContext(), RTList);
5506 ABIArgInfo MipsABIInfo::classifyReturnType(QualType RetTy) const {
5507 uint64_t Size = getContext().getTypeSize(RetTy);
5509 if (RetTy->isVoidType() || Size == 0)
5510 return ABIArgInfo::getIgnore();
5512 if (isAggregateTypeForABI(RetTy) || RetTy->isVectorType()) {
5514 if (RetTy->isAnyComplexType())
5515 return ABIArgInfo::getDirect();
5517 // O32 returns integer vectors in registers.
5518 if (IsO32 && RetTy->isVectorType() && !RetTy->hasFloatingRepresentation())
5519 return ABIArgInfo::getDirect(returnAggregateInRegs(RetTy, Size));
5522 return ABIArgInfo::getDirect(returnAggregateInRegs(RetTy, Size));
5525 return ABIArgInfo::getIndirect(0);
5528 // Treat an enum type as its underlying type.
5529 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
5530 RetTy = EnumTy->getDecl()->getIntegerType();
5532 return (RetTy->isPromotableIntegerType() ?
5533 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
5536 void MipsABIInfo::computeInfo(CGFunctionInfo &FI) const {
5537 ABIArgInfo &RetInfo = FI.getReturnInfo();
5538 if (!getCXXABI().classifyReturnType(FI))
5539 RetInfo = classifyReturnType(FI.getReturnType());
5541 // Check if a pointer to an aggregate is passed as a hidden argument.
5542 uint64_t Offset = RetInfo.isIndirect() ? MinABIStackAlignInBytes : 0;
5544 for (auto &I : FI.arguments())
5545 I.info = classifyArgumentType(I.type, Offset);
5548 llvm::Value* MipsABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
5549 CodeGenFunction &CGF) const {
5550 llvm::Type *BP = CGF.Int8PtrTy;
5551 llvm::Type *BPP = CGF.Int8PtrPtrTy;
5553 CGBuilderTy &Builder = CGF.Builder;
5554 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap");
5555 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
5556 int64_t TypeAlign = getContext().getTypeAlign(Ty) / 8;
5557 llvm::Type *PTy = llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
5558 llvm::Value *AddrTyped;
5559 unsigned PtrWidth = getTarget().getPointerWidth(0);
5560 llvm::IntegerType *IntTy = (PtrWidth == 32) ? CGF.Int32Ty : CGF.Int64Ty;
5562 if (TypeAlign > MinABIStackAlignInBytes) {
5563 llvm::Value *AddrAsInt = CGF.Builder.CreatePtrToInt(Addr, IntTy);
5564 llvm::Value *Inc = llvm::ConstantInt::get(IntTy, TypeAlign - 1);
5565 llvm::Value *Mask = llvm::ConstantInt::get(IntTy, -TypeAlign);
5566 llvm::Value *Add = CGF.Builder.CreateAdd(AddrAsInt, Inc);
5567 llvm::Value *And = CGF.Builder.CreateAnd(Add, Mask);
5568 AddrTyped = CGF.Builder.CreateIntToPtr(And, PTy);
5571 AddrTyped = Builder.CreateBitCast(Addr, PTy);
5573 llvm::Value *AlignedAddr = Builder.CreateBitCast(AddrTyped, BP);
5574 TypeAlign = std::max((unsigned)TypeAlign, MinABIStackAlignInBytes);
5576 llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, TypeAlign);
5577 llvm::Value *NextAddr =
5578 Builder.CreateGEP(AlignedAddr, llvm::ConstantInt::get(IntTy, Offset),
5580 Builder.CreateStore(NextAddr, VAListAddrAsBPP);
5586 MIPSTargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
5587 llvm::Value *Address) const {
5588 // This information comes from gcc's implementation, which seems to
5589 // as canonical as it gets.
5591 // Everything on MIPS is 4 bytes. Double-precision FP registers
5592 // are aliased to pairs of single-precision FP registers.
5593 llvm::Value *Four8 = llvm::ConstantInt::get(CGF.Int8Ty, 4);
5595 // 0-31 are the general purpose registers, $0 - $31.
5596 // 32-63 are the floating-point registers, $f0 - $f31.
5597 // 64 and 65 are the multiply/divide registers, $hi and $lo.
5598 // 66 is the (notional, I think) register for signal-handler return.
5599 AssignToArrayRange(CGF.Builder, Address, Four8, 0, 65);
5601 // 67-74 are the floating-point status registers, $fcc0 - $fcc7.
5602 // They are one bit wide and ignored here.
5604 // 80-111 are the coprocessor 0 registers, $c0r0 - $c0r31.
5605 // (coprocessor 1 is the FP unit)
5606 // 112-143 are the coprocessor 2 registers, $c2r0 - $c2r31.
5607 // 144-175 are the coprocessor 3 registers, $c3r0 - $c3r31.
5608 // 176-181 are the DSP accumulator registers.
5609 AssignToArrayRange(CGF.Builder, Address, Four8, 80, 181);
5613 //===----------------------------------------------------------------------===//
5614 // TCE ABI Implementation (see http://tce.cs.tut.fi). Uses mostly the defaults.
5615 // Currently subclassed only to implement custom OpenCL C function attribute
5617 //===----------------------------------------------------------------------===//
5621 class TCETargetCodeGenInfo : public DefaultTargetCodeGenInfo {
5623 TCETargetCodeGenInfo(CodeGenTypes &CGT)
5624 : DefaultTargetCodeGenInfo(CGT) {}
5626 void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
5627 CodeGen::CodeGenModule &M) const override;
5630 void TCETargetCodeGenInfo::SetTargetAttributes(const Decl *D,
5631 llvm::GlobalValue *GV,
5632 CodeGen::CodeGenModule &M) const {
5633 const FunctionDecl *FD = dyn_cast<FunctionDecl>(D);
5636 llvm::Function *F = cast<llvm::Function>(GV);
5638 if (M.getLangOpts().OpenCL) {
5639 if (FD->hasAttr<OpenCLKernelAttr>()) {
5640 // OpenCL C Kernel functions are not subject to inlining
5641 F->addFnAttr(llvm::Attribute::NoInline);
5642 const ReqdWorkGroupSizeAttr *Attr = FD->getAttr<ReqdWorkGroupSizeAttr>();
5644 // Convert the reqd_work_group_size() attributes to metadata.
5645 llvm::LLVMContext &Context = F->getContext();
5646 llvm::NamedMDNode *OpenCLMetadata =
5647 M.getModule().getOrInsertNamedMetadata("opencl.kernel_wg_size_info");
5649 SmallVector<llvm::Value*, 5> Operands;
5650 Operands.push_back(F);
5652 Operands.push_back(llvm::Constant::getIntegerValue(M.Int32Ty,
5653 llvm::APInt(32, Attr->getXDim())));
5654 Operands.push_back(llvm::Constant::getIntegerValue(M.Int32Ty,
5655 llvm::APInt(32, Attr->getYDim())));
5656 Operands.push_back(llvm::Constant::getIntegerValue(M.Int32Ty,
5657 llvm::APInt(32, Attr->getZDim())));
5659 // Add a boolean constant operand for "required" (true) or "hint" (false)
5660 // for implementing the work_group_size_hint attr later. Currently
5661 // always true as the hint is not yet implemented.
5662 Operands.push_back(llvm::ConstantInt::getTrue(Context));
5663 OpenCLMetadata->addOperand(llvm::MDNode::get(Context, Operands));
5671 //===----------------------------------------------------------------------===//
5672 // Hexagon ABI Implementation
5673 //===----------------------------------------------------------------------===//
5677 class HexagonABIInfo : public ABIInfo {
5681 HexagonABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
5685 ABIArgInfo classifyReturnType(QualType RetTy) const;
5686 ABIArgInfo classifyArgumentType(QualType RetTy) const;
5688 void computeInfo(CGFunctionInfo &FI) const override;
5690 llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
5691 CodeGenFunction &CGF) const override;
5694 class HexagonTargetCodeGenInfo : public TargetCodeGenInfo {
5696 HexagonTargetCodeGenInfo(CodeGenTypes &CGT)
5697 :TargetCodeGenInfo(new HexagonABIInfo(CGT)) {}
5699 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
5706 void HexagonABIInfo::computeInfo(CGFunctionInfo &FI) const {
5707 if (!getCXXABI().classifyReturnType(FI))
5708 FI.getReturnInfo() = classifyReturnType(FI.getReturnType());
5709 for (auto &I : FI.arguments())
5710 I.info = classifyArgumentType(I.type);
5713 ABIArgInfo HexagonABIInfo::classifyArgumentType(QualType Ty) const {
5714 if (!isAggregateTypeForABI(Ty)) {
5715 // Treat an enum type as its underlying type.
5716 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
5717 Ty = EnumTy->getDecl()->getIntegerType();
5719 return (Ty->isPromotableIntegerType() ?
5720 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
5723 // Ignore empty records.
5724 if (isEmptyRecord(getContext(), Ty, true))
5725 return ABIArgInfo::getIgnore();
5727 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
5728 return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
5730 uint64_t Size = getContext().getTypeSize(Ty);
5732 return ABIArgInfo::getIndirect(0, /*ByVal=*/true);
5733 // Pass in the smallest viable integer type.
5735 return ABIArgInfo::getDirect(llvm::Type::getInt64Ty(getVMContext()));
5737 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
5739 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
5741 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
5744 ABIArgInfo HexagonABIInfo::classifyReturnType(QualType RetTy) const {
5745 if (RetTy->isVoidType())
5746 return ABIArgInfo::getIgnore();
5748 // Large vector types should be returned via memory.
5749 if (RetTy->isVectorType() && getContext().getTypeSize(RetTy) > 64)
5750 return ABIArgInfo::getIndirect(0);
5752 if (!isAggregateTypeForABI(RetTy)) {
5753 // Treat an enum type as its underlying type.
5754 if (const EnumType *EnumTy = RetTy->getAs<EnumType>())
5755 RetTy = EnumTy->getDecl()->getIntegerType();
5757 return (RetTy->isPromotableIntegerType() ?
5758 ABIArgInfo::getExtend() : ABIArgInfo::getDirect());
5761 if (isEmptyRecord(getContext(), RetTy, true))
5762 return ABIArgInfo::getIgnore();
5764 // Aggregates <= 8 bytes are returned in r0; other aggregates
5765 // are returned indirectly.
5766 uint64_t Size = getContext().getTypeSize(RetTy);
5768 // Return in the smallest viable integer type.
5770 return ABIArgInfo::getDirect(llvm::Type::getInt8Ty(getVMContext()));
5772 return ABIArgInfo::getDirect(llvm::Type::getInt16Ty(getVMContext()));
5774 return ABIArgInfo::getDirect(llvm::Type::getInt32Ty(getVMContext()));
5775 return ABIArgInfo::getDirect(llvm::Type::getInt64Ty(getVMContext()));
5778 return ABIArgInfo::getIndirect(0, /*ByVal=*/true);
5781 llvm::Value *HexagonABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
5782 CodeGenFunction &CGF) const {
5783 // FIXME: Need to handle alignment
5784 llvm::Type *BPP = CGF.Int8PtrPtrTy;
5786 CGBuilderTy &Builder = CGF.Builder;
5787 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP,
5789 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
5791 llvm::PointerType::getUnqual(CGF.ConvertType(Ty));
5792 llvm::Value *AddrTyped = Builder.CreateBitCast(Addr, PTy);
5795 llvm::RoundUpToAlignment(CGF.getContext().getTypeSize(Ty) / 8, 4);
5796 llvm::Value *NextAddr =
5797 Builder.CreateGEP(Addr, llvm::ConstantInt::get(CGF.Int32Ty, Offset),
5799 Builder.CreateStore(NextAddr, VAListAddrAsBPP);
5805 //===----------------------------------------------------------------------===//
5806 // SPARC v9 ABI Implementation.
5807 // Based on the SPARC Compliance Definition version 2.4.1.
5809 // Function arguments a mapped to a nominal "parameter array" and promoted to
5810 // registers depending on their type. Each argument occupies 8 or 16 bytes in
5811 // the array, structs larger than 16 bytes are passed indirectly.
5813 // One case requires special care:
5820 // When a struct mixed is passed by value, it only occupies 8 bytes in the
5821 // parameter array, but the int is passed in an integer register, and the float
5822 // is passed in a floating point register. This is represented as two arguments
5823 // with the LLVM IR inreg attribute:
5825 // declare void f(i32 inreg %i, float inreg %f)
5827 // The code generator will only allocate 4 bytes from the parameter array for
5828 // the inreg arguments. All other arguments are allocated a multiple of 8
5832 class SparcV9ABIInfo : public ABIInfo {
5834 SparcV9ABIInfo(CodeGenTypes &CGT) : ABIInfo(CGT) {}
5837 ABIArgInfo classifyType(QualType RetTy, unsigned SizeLimit) const;
5838 void computeInfo(CGFunctionInfo &FI) const override;
5839 llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
5840 CodeGenFunction &CGF) const override;
5842 // Coercion type builder for structs passed in registers. The coercion type
5843 // serves two purposes:
5845 // 1. Pad structs to a multiple of 64 bits, so they are passed 'left-aligned'
5847 // 2. Expose aligned floating point elements as first-level elements, so the
5848 // code generator knows to pass them in floating point registers.
5850 // We also compute the InReg flag which indicates that the struct contains
5851 // aligned 32-bit floats.
5853 struct CoerceBuilder {
5854 llvm::LLVMContext &Context;
5855 const llvm::DataLayout &DL;
5856 SmallVector<llvm::Type*, 8> Elems;
5860 CoerceBuilder(llvm::LLVMContext &c, const llvm::DataLayout &dl)
5861 : Context(c), DL(dl), Size(0), InReg(false) {}
5863 // Pad Elems with integers until Size is ToSize.
5864 void pad(uint64_t ToSize) {
5865 assert(ToSize >= Size && "Cannot remove elements");
5869 // Finish the current 64-bit word.
5870 uint64_t Aligned = llvm::RoundUpToAlignment(Size, 64);
5871 if (Aligned > Size && Aligned <= ToSize) {
5872 Elems.push_back(llvm::IntegerType::get(Context, Aligned - Size));
5876 // Add whole 64-bit words.
5877 while (Size + 64 <= ToSize) {
5878 Elems.push_back(llvm::Type::getInt64Ty(Context));
5882 // Final in-word padding.
5883 if (Size < ToSize) {
5884 Elems.push_back(llvm::IntegerType::get(Context, ToSize - Size));
5889 // Add a floating point element at Offset.
5890 void addFloat(uint64_t Offset, llvm::Type *Ty, unsigned Bits) {
5891 // Unaligned floats are treated as integers.
5894 // The InReg flag is only required if there are any floats < 64 bits.
5898 Elems.push_back(Ty);
5899 Size = Offset + Bits;
5902 // Add a struct type to the coercion type, starting at Offset (in bits).
5903 void addStruct(uint64_t Offset, llvm::StructType *StrTy) {
5904 const llvm::StructLayout *Layout = DL.getStructLayout(StrTy);
5905 for (unsigned i = 0, e = StrTy->getNumElements(); i != e; ++i) {
5906 llvm::Type *ElemTy = StrTy->getElementType(i);
5907 uint64_t ElemOffset = Offset + Layout->getElementOffsetInBits(i);
5908 switch (ElemTy->getTypeID()) {
5909 case llvm::Type::StructTyID:
5910 addStruct(ElemOffset, cast<llvm::StructType>(ElemTy));
5912 case llvm::Type::FloatTyID:
5913 addFloat(ElemOffset, ElemTy, 32);
5915 case llvm::Type::DoubleTyID:
5916 addFloat(ElemOffset, ElemTy, 64);
5918 case llvm::Type::FP128TyID:
5919 addFloat(ElemOffset, ElemTy, 128);
5921 case llvm::Type::PointerTyID:
5922 if (ElemOffset % 64 == 0) {
5924 Elems.push_back(ElemTy);
5934 // Check if Ty is a usable substitute for the coercion type.
5935 bool isUsableType(llvm::StructType *Ty) const {
5936 if (Ty->getNumElements() != Elems.size())
5938 for (unsigned i = 0, e = Elems.size(); i != e; ++i)
5939 if (Elems[i] != Ty->getElementType(i))
5944 // Get the coercion type as a literal struct type.
5945 llvm::Type *getType() const {
5946 if (Elems.size() == 1)
5947 return Elems.front();
5949 return llvm::StructType::get(Context, Elems);
5953 } // end anonymous namespace
5956 SparcV9ABIInfo::classifyType(QualType Ty, unsigned SizeLimit) const {
5957 if (Ty->isVoidType())
5958 return ABIArgInfo::getIgnore();
5960 uint64_t Size = getContext().getTypeSize(Ty);
5962 // Anything too big to fit in registers is passed with an explicit indirect
5963 // pointer / sret pointer.
5964 if (Size > SizeLimit)
5965 return ABIArgInfo::getIndirect(0, /*ByVal=*/false);
5967 // Treat an enum type as its underlying type.
5968 if (const EnumType *EnumTy = Ty->getAs<EnumType>())
5969 Ty = EnumTy->getDecl()->getIntegerType();
5971 // Integer types smaller than a register are extended.
5972 if (Size < 64 && Ty->isIntegerType())
5973 return ABIArgInfo::getExtend();
5975 // Other non-aggregates go in registers.
5976 if (!isAggregateTypeForABI(Ty))
5977 return ABIArgInfo::getDirect();
5979 // If a C++ object has either a non-trivial copy constructor or a non-trivial
5980 // destructor, it is passed with an explicit indirect pointer / sret pointer.
5981 if (CGCXXABI::RecordArgABI RAA = getRecordArgABI(Ty, getCXXABI()))
5982 return ABIArgInfo::getIndirect(0, RAA == CGCXXABI::RAA_DirectInMemory);
5984 // This is a small aggregate type that should be passed in registers.
5985 // Build a coercion type from the LLVM struct type.
5986 llvm::StructType *StrTy = dyn_cast<llvm::StructType>(CGT.ConvertType(Ty));
5988 return ABIArgInfo::getDirect();
5990 CoerceBuilder CB(getVMContext(), getDataLayout());
5991 CB.addStruct(0, StrTy);
5992 CB.pad(llvm::RoundUpToAlignment(CB.DL.getTypeSizeInBits(StrTy), 64));
5994 // Try to use the original type for coercion.
5995 llvm::Type *CoerceTy = CB.isUsableType(StrTy) ? StrTy : CB.getType();
5998 return ABIArgInfo::getDirectInReg(CoerceTy);
6000 return ABIArgInfo::getDirect(CoerceTy);
6003 llvm::Value *SparcV9ABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
6004 CodeGenFunction &CGF) const {
6005 ABIArgInfo AI = classifyType(Ty, 16 * 8);
6006 llvm::Type *ArgTy = CGT.ConvertType(Ty);
6007 if (AI.canHaveCoerceToType() && !AI.getCoerceToType())
6008 AI.setCoerceToType(ArgTy);
6010 llvm::Type *BPP = CGF.Int8PtrPtrTy;
6011 CGBuilderTy &Builder = CGF.Builder;
6012 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr, BPP, "ap");
6013 llvm::Value *Addr = Builder.CreateLoad(VAListAddrAsBPP, "ap.cur");
6014 llvm::Type *ArgPtrTy = llvm::PointerType::getUnqual(ArgTy);
6015 llvm::Value *ArgAddr;
6018 switch (AI.getKind()) {
6019 case ABIArgInfo::Expand:
6020 case ABIArgInfo::InAlloca:
6021 llvm_unreachable("Unsupported ABI kind for va_arg");
6023 case ABIArgInfo::Extend:
6026 .CreateConstGEP1_32(Addr, 8 - getDataLayout().getTypeAllocSize(ArgTy),
6030 case ABIArgInfo::Direct:
6031 Stride = getDataLayout().getTypeAllocSize(AI.getCoerceToType());
6035 case ABIArgInfo::Indirect:
6037 ArgAddr = Builder.CreateBitCast(Addr,
6038 llvm::PointerType::getUnqual(ArgPtrTy),
6040 ArgAddr = Builder.CreateLoad(ArgAddr, "indirect.arg");
6043 case ABIArgInfo::Ignore:
6044 return llvm::UndefValue::get(ArgPtrTy);
6048 Addr = Builder.CreateConstGEP1_32(Addr, Stride, "ap.next");
6049 Builder.CreateStore(Addr, VAListAddrAsBPP);
6051 return Builder.CreatePointerCast(ArgAddr, ArgPtrTy, "arg.addr");
6054 void SparcV9ABIInfo::computeInfo(CGFunctionInfo &FI) const {
6055 FI.getReturnInfo() = classifyType(FI.getReturnType(), 32 * 8);
6056 for (auto &I : FI.arguments())
6057 I.info = classifyType(I.type, 16 * 8);
6061 class SparcV9TargetCodeGenInfo : public TargetCodeGenInfo {
6063 SparcV9TargetCodeGenInfo(CodeGenTypes &CGT)
6064 : TargetCodeGenInfo(new SparcV9ABIInfo(CGT)) {}
6066 int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const override {
6070 bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
6071 llvm::Value *Address) const override;
6073 } // end anonymous namespace
6076 SparcV9TargetCodeGenInfo::initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
6077 llvm::Value *Address) const {
6078 // This is calculated from the LLVM and GCC tables and verified
6079 // against gcc output. AFAIK all ABIs use the same encoding.
6081 CodeGen::CGBuilderTy &Builder = CGF.Builder;
6083 llvm::IntegerType *i8 = CGF.Int8Ty;
6084 llvm::Value *Four8 = llvm::ConstantInt::get(i8, 4);
6085 llvm::Value *Eight8 = llvm::ConstantInt::get(i8, 8);
6087 // 0-31: the 8-byte general-purpose registers
6088 AssignToArrayRange(Builder, Address, Eight8, 0, 31);
6090 // 32-63: f0-31, the 4-byte floating-point registers
6091 AssignToArrayRange(Builder, Address, Four8, 32, 63);
6101 AssignToArrayRange(Builder, Address, Eight8, 64, 71);
6103 // 72-87: d0-15, the 8-byte floating-point registers
6104 AssignToArrayRange(Builder, Address, Eight8, 72, 87);
6110 //===----------------------------------------------------------------------===//
6111 // XCore ABI Implementation
6112 //===----------------------------------------------------------------------===//
6116 /// A SmallStringEnc instance is used to build up the TypeString by passing
6117 /// it by reference between functions that append to it.
6118 typedef llvm::SmallString<128> SmallStringEnc;
6120 /// TypeStringCache caches the meta encodings of Types.
6122 /// The reason for caching TypeStrings is two fold:
6123 /// 1. To cache a type's encoding for later uses;
6124 /// 2. As a means to break recursive member type inclusion.
6126 /// A cache Entry can have a Status of:
6127 /// NonRecursive: The type encoding is not recursive;
6128 /// Recursive: The type encoding is recursive;
6129 /// Incomplete: An incomplete TypeString;
6130 /// IncompleteUsed: An incomplete TypeString that has been used in a
6131 /// Recursive type encoding.
6133 /// A NonRecursive entry will have all of its sub-members expanded as fully
6134 /// as possible. Whilst it may contain types which are recursive, the type
6135 /// itself is not recursive and thus its encoding may be safely used whenever
6136 /// the type is encountered.
6138 /// A Recursive entry will have all of its sub-members expanded as fully as
6139 /// possible. The type itself is recursive and it may contain other types which
6140 /// are recursive. The Recursive encoding must not be used during the expansion
6141 /// of a recursive type's recursive branch. For simplicity the code uses
6142 /// IncompleteCount to reject all usage of Recursive encodings for member types.
6144 /// An Incomplete entry is always a RecordType and only encodes its
6145 /// identifier e.g. "s(S){}". Incomplete 'StubEnc' entries are ephemeral and
6146 /// are placed into the cache during type expansion as a means to identify and
6147 /// handle recursive inclusion of types as sub-members. If there is recursion
6148 /// the entry becomes IncompleteUsed.
6150 /// During the expansion of a RecordType's members:
6152 /// If the cache contains a NonRecursive encoding for the member type, the
6153 /// cached encoding is used;
6155 /// If the cache contains a Recursive encoding for the member type, the
6156 /// cached encoding is 'Swapped' out, as it may be incorrect, and...
6158 /// If the member is a RecordType, an Incomplete encoding is placed into the
6159 /// cache to break potential recursive inclusion of itself as a sub-member;
6161 /// Once a member RecordType has been expanded, its temporary incomplete
6162 /// entry is removed from the cache. If a Recursive encoding was swapped out
6163 /// it is swapped back in;
6165 /// If an incomplete entry is used to expand a sub-member, the incomplete
6166 /// entry is marked as IncompleteUsed. The cache keeps count of how many
6167 /// IncompleteUsed entries it currently contains in IncompleteUsedCount;
6169 /// If a member's encoding is found to be a NonRecursive or Recursive viz:
6170 /// IncompleteUsedCount==0, the member's encoding is added to the cache.
6171 /// Else the member is part of a recursive type and thus the recursion has
6172 /// been exited too soon for the encoding to be correct for the member.
6174 class TypeStringCache {
6175 enum Status {NonRecursive, Recursive, Incomplete, IncompleteUsed};
6177 std::string Str; // The encoded TypeString for the type.
6178 enum Status State; // Information about the encoding in 'Str'.
6179 std::string Swapped; // A temporary place holder for a Recursive encoding
6180 // during the expansion of RecordType's members.
6182 std::map<const IdentifierInfo *, struct Entry> Map;
6183 unsigned IncompleteCount; // Number of Incomplete entries in the Map.
6184 unsigned IncompleteUsedCount; // Number of IncompleteUsed entries in the Map.
6186 TypeStringCache() : IncompleteCount(0), IncompleteUsedCount(0) {};
6187 void addIncomplete(const IdentifierInfo *ID, std::string StubEnc);
6188 bool removeIncomplete(const IdentifierInfo *ID);
6189 void addIfComplete(const IdentifierInfo *ID, StringRef Str,
6191 StringRef lookupStr(const IdentifierInfo *ID);
6194 /// TypeString encodings for enum & union fields must be order.
6195 /// FieldEncoding is a helper for this ordering process.
6196 class FieldEncoding {
6200 FieldEncoding(bool b, SmallStringEnc &e) : HasName(b), Enc(e.c_str()) {};
6201 StringRef str() {return Enc.c_str();};
6202 bool operator<(const FieldEncoding &rhs) const {
6203 if (HasName != rhs.HasName) return HasName;
6204 return Enc < rhs.Enc;
6208 class XCoreABIInfo : public DefaultABIInfo {
6210 XCoreABIInfo(CodeGen::CodeGenTypes &CGT) : DefaultABIInfo(CGT) {}
6211 llvm::Value *EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
6212 CodeGenFunction &CGF) const override;
6215 class XCoreTargetCodeGenInfo : public TargetCodeGenInfo {
6216 mutable TypeStringCache TSC;
6218 XCoreTargetCodeGenInfo(CodeGenTypes &CGT)
6219 :TargetCodeGenInfo(new XCoreABIInfo(CGT)) {}
6220 void emitTargetMD(const Decl *D, llvm::GlobalValue *GV,
6221 CodeGen::CodeGenModule &M) const override;
6224 } // End anonymous namespace.
6226 llvm::Value *XCoreABIInfo::EmitVAArg(llvm::Value *VAListAddr, QualType Ty,
6227 CodeGenFunction &CGF) const {
6228 CGBuilderTy &Builder = CGF.Builder;
6231 llvm::Value *VAListAddrAsBPP = Builder.CreateBitCast(VAListAddr,
6233 llvm::Value *AP = Builder.CreateLoad(VAListAddrAsBPP);
6235 // Handle the argument.
6236 ABIArgInfo AI = classifyArgumentType(Ty);
6237 llvm::Type *ArgTy = CGT.ConvertType(Ty);
6238 if (AI.canHaveCoerceToType() && !AI.getCoerceToType())
6239 AI.setCoerceToType(ArgTy);
6240 llvm::Type *ArgPtrTy = llvm::PointerType::getUnqual(ArgTy);
6242 uint64_t ArgSize = 0;
6243 switch (AI.getKind()) {
6244 case ABIArgInfo::Expand:
6245 case ABIArgInfo::InAlloca:
6246 llvm_unreachable("Unsupported ABI kind for va_arg");
6247 case ABIArgInfo::Ignore:
6248 Val = llvm::UndefValue::get(ArgPtrTy);
6251 case ABIArgInfo::Extend:
6252 case ABIArgInfo::Direct:
6253 Val = Builder.CreatePointerCast(AP, ArgPtrTy);
6254 ArgSize = getDataLayout().getTypeAllocSize(AI.getCoerceToType());
6258 case ABIArgInfo::Indirect:
6259 llvm::Value *ArgAddr;
6260 ArgAddr = Builder.CreateBitCast(AP, llvm::PointerType::getUnqual(ArgPtrTy));
6261 ArgAddr = Builder.CreateLoad(ArgAddr);
6262 Val = Builder.CreatePointerCast(ArgAddr, ArgPtrTy);
6267 // Increment the VAList.
6269 llvm::Value *APN = Builder.CreateConstGEP1_32(AP, ArgSize);
6270 Builder.CreateStore(APN, VAListAddrAsBPP);
6275 /// During the expansion of a RecordType, an incomplete TypeString is placed
6276 /// into the cache as a means to identify and break recursion.
6277 /// If there is a Recursive encoding in the cache, it is swapped out and will
6278 /// be reinserted by removeIncomplete().
6279 /// All other types of encoding should have been used rather than arriving here.
6280 void TypeStringCache::addIncomplete(const IdentifierInfo *ID,
6281 std::string StubEnc) {
6285 assert( (E.Str.empty() || E.State == Recursive) &&
6286 "Incorrectly use of addIncomplete");
6287 assert(!StubEnc.empty() && "Passing an empty string to addIncomplete()");
6288 E.Swapped.swap(E.Str); // swap out the Recursive
6289 E.Str.swap(StubEnc);
6290 E.State = Incomplete;
6294 /// Once the RecordType has been expanded, the temporary incomplete TypeString
6295 /// must be removed from the cache.
6296 /// If a Recursive was swapped out by addIncomplete(), it will be replaced.
6297 /// Returns true if the RecordType was defined recursively.
6298 bool TypeStringCache::removeIncomplete(const IdentifierInfo *ID) {
6301 auto I = Map.find(ID);
6302 assert(I != Map.end() && "Entry not present");
6303 Entry &E = I->second;
6304 assert( (E.State == Incomplete ||
6305 E.State == IncompleteUsed) &&
6306 "Entry must be an incomplete type");
6307 bool IsRecursive = false;
6308 if (E.State == IncompleteUsed) {
6309 // We made use of our Incomplete encoding, thus we are recursive.
6311 --IncompleteUsedCount;
6313 if (E.Swapped.empty())
6316 // Swap the Recursive back.
6317 E.Swapped.swap(E.Str);
6319 E.State = Recursive;
6325 /// Add the encoded TypeString to the cache only if it is NonRecursive or
6326 /// Recursive (viz: all sub-members were expanded as fully as possible).
6327 void TypeStringCache::addIfComplete(const IdentifierInfo *ID, StringRef Str,
6329 if (!ID || IncompleteUsedCount)
6330 return; // No key or it is is an incomplete sub-type so don't add.
6332 if (IsRecursive && !E.Str.empty()) {
6333 assert(E.State==Recursive && E.Str.size() == Str.size() &&
6334 "This is not the same Recursive entry");
6335 // The parent container was not recursive after all, so we could have used
6336 // this Recursive sub-member entry after all, but we assumed the worse when
6337 // we started viz: IncompleteCount!=0.
6340 assert(E.Str.empty() && "Entry already present");
6342 E.State = IsRecursive? Recursive : NonRecursive;
6345 /// Return a cached TypeString encoding for the ID. If there isn't one, or we
6346 /// are recursively expanding a type (IncompleteCount != 0) and the cached
6347 /// encoding is Recursive, return an empty StringRef.
6348 StringRef TypeStringCache::lookupStr(const IdentifierInfo *ID) {
6350 return StringRef(); // We have no key.
6351 auto I = Map.find(ID);
6353 return StringRef(); // We have no encoding.
6354 Entry &E = I->second;
6355 if (E.State == Recursive && IncompleteCount)
6356 return StringRef(); // We don't use Recursive encodings for member types.
6358 if (E.State == Incomplete) {
6359 // The incomplete type is being used to break out of recursion.
6360 E.State = IncompleteUsed;
6361 ++IncompleteUsedCount;
6363 return E.Str.c_str();
6366 /// The XCore ABI includes a type information section that communicates symbol
6367 /// type information to the linker. The linker uses this information to verify
6368 /// safety/correctness of things such as array bound and pointers et al.
6369 /// The ABI only requires C (and XC) language modules to emit TypeStrings.
6370 /// This type information (TypeString) is emitted into meta data for all global
6371 /// symbols: definitions, declarations, functions & variables.
6373 /// The TypeString carries type, qualifier, name, size & value details.
6374 /// Please see 'Tools Development Guide' section 2.16.2 for format details:
6375 /// <https://www.xmos.com/download/public/Tools-Development-Guide%28X9114A%29.pdf>
6376 /// The output is tested by test/CodeGen/xcore-stringtype.c.
6378 static bool getTypeString(SmallStringEnc &Enc, const Decl *D,
6379 CodeGen::CodeGenModule &CGM, TypeStringCache &TSC);
6381 /// XCore uses emitTargetMD to emit TypeString metadata for global symbols.
6382 void XCoreTargetCodeGenInfo::emitTargetMD(const Decl *D, llvm::GlobalValue *GV,
6383 CodeGen::CodeGenModule &CGM) const {
6385 if (getTypeString(Enc, D, CGM, TSC)) {
6386 llvm::LLVMContext &Ctx = CGM.getModule().getContext();
6387 llvm::SmallVector<llvm::Value *, 2> MDVals;
6388 MDVals.push_back(GV);
6389 MDVals.push_back(llvm::MDString::get(Ctx, Enc.str()));
6390 llvm::NamedMDNode *MD =
6391 CGM.getModule().getOrInsertNamedMetadata("xcore.typestrings");
6392 MD->addOperand(llvm::MDNode::get(Ctx, MDVals));
6396 static bool appendType(SmallStringEnc &Enc, QualType QType,
6397 const CodeGen::CodeGenModule &CGM,
6398 TypeStringCache &TSC);
6400 /// Helper function for appendRecordType().
6401 /// Builds a SmallVector containing the encoded field types in declaration order.
6402 static bool extractFieldType(SmallVectorImpl<FieldEncoding> &FE,
6403 const RecordDecl *RD,
6404 const CodeGen::CodeGenModule &CGM,
6405 TypeStringCache &TSC) {
6406 for (RecordDecl::field_iterator I = RD->field_begin(), E = RD->field_end();
6410 Enc += I->getName();
6412 if (I->isBitField()) {
6414 llvm::raw_svector_ostream OS(Enc);
6416 OS << I->getBitWidthValue(CGM.getContext());
6420 if (!appendType(Enc, I->getType(), CGM, TSC))
6422 if (I->isBitField())
6425 FE.push_back(FieldEncoding(!I->getName().empty(), Enc));
6430 /// Appends structure and union types to Enc and adds encoding to cache.
6431 /// Recursively calls appendType (via extractFieldType) for each field.
6432 /// Union types have their fields ordered according to the ABI.
6433 static bool appendRecordType(SmallStringEnc &Enc, const RecordType *RT,
6434 const CodeGen::CodeGenModule &CGM,
6435 TypeStringCache &TSC, const IdentifierInfo *ID) {
6436 // Append the cached TypeString if we have one.
6437 StringRef TypeString = TSC.lookupStr(ID);
6438 if (!TypeString.empty()) {
6443 // Start to emit an incomplete TypeString.
6444 size_t Start = Enc.size();
6445 Enc += (RT->isUnionType()? 'u' : 's');
6448 Enc += ID->getName();
6451 // We collect all encoded fields and order as necessary.
6452 bool IsRecursive = false;
6453 const RecordDecl *RD = RT->getDecl()->getDefinition();
6454 if (RD && !RD->field_empty()) {
6455 // An incomplete TypeString stub is placed in the cache for this RecordType
6456 // so that recursive calls to this RecordType will use it whilst building a
6457 // complete TypeString for this RecordType.
6458 SmallVector<FieldEncoding, 16> FE;
6459 std::string StubEnc(Enc.substr(Start).str());
6460 StubEnc += '}'; // StubEnc now holds a valid incomplete TypeString.
6461 TSC.addIncomplete(ID, std::move(StubEnc));
6462 if (!extractFieldType(FE, RD, CGM, TSC)) {
6463 (void) TSC.removeIncomplete(ID);
6466 IsRecursive = TSC.removeIncomplete(ID);
6467 // The ABI requires unions to be sorted but not structures.
6468 // See FieldEncoding::operator< for sort algorithm.
6469 if (RT->isUnionType())
6470 std::sort(FE.begin(), FE.end());
6471 // We can now complete the TypeString.
6472 unsigned E = FE.size();
6473 for (unsigned I = 0; I != E; ++I) {
6480 TSC.addIfComplete(ID, Enc.substr(Start), IsRecursive);
6484 /// Appends enum types to Enc and adds the encoding to the cache.
6485 static bool appendEnumType(SmallStringEnc &Enc, const EnumType *ET,
6486 TypeStringCache &TSC,
6487 const IdentifierInfo *ID) {
6488 // Append the cached TypeString if we have one.
6489 StringRef TypeString = TSC.lookupStr(ID);
6490 if (!TypeString.empty()) {
6495 size_t Start = Enc.size();
6498 Enc += ID->getName();
6501 // We collect all encoded enumerations and order them alphanumerically.
6502 if (const EnumDecl *ED = ET->getDecl()->getDefinition()) {
6503 SmallVector<FieldEncoding, 16> FE;
6504 for (auto I = ED->enumerator_begin(), E = ED->enumerator_end(); I != E;
6506 SmallStringEnc EnumEnc;
6508 EnumEnc += I->getName();
6510 I->getInitVal().toString(EnumEnc);
6512 FE.push_back(FieldEncoding(!I->getName().empty(), EnumEnc));
6514 std::sort(FE.begin(), FE.end());
6515 unsigned E = FE.size();
6516 for (unsigned I = 0; I != E; ++I) {
6523 TSC.addIfComplete(ID, Enc.substr(Start), false);
6527 /// Appends type's qualifier to Enc.
6528 /// This is done prior to appending the type's encoding.
6529 static void appendQualifier(SmallStringEnc &Enc, QualType QT) {
6530 // Qualifiers are emitted in alphabetical order.
6531 static const char *Table[] = {"","c:","r:","cr:","v:","cv:","rv:","crv:"};
6533 if (QT.isConstQualified())
6535 if (QT.isRestrictQualified())
6537 if (QT.isVolatileQualified())
6539 Enc += Table[Lookup];
6542 /// Appends built-in types to Enc.
6543 static bool appendBuiltinType(SmallStringEnc &Enc, const BuiltinType *BT) {
6544 const char *EncType;
6545 switch (BT->getKind()) {
6546 case BuiltinType::Void:
6549 case BuiltinType::Bool:
6552 case BuiltinType::Char_U:
6555 case BuiltinType::UChar:
6558 case BuiltinType::SChar:
6561 case BuiltinType::UShort:
6564 case BuiltinType::Short:
6567 case BuiltinType::UInt:
6570 case BuiltinType::Int:
6573 case BuiltinType::ULong:
6576 case BuiltinType::Long:
6579 case BuiltinType::ULongLong:
6582 case BuiltinType::LongLong:
6585 case BuiltinType::Float:
6588 case BuiltinType::Double:
6591 case BuiltinType::LongDouble:
6601 /// Appends a pointer encoding to Enc before calling appendType for the pointee.
6602 static bool appendPointerType(SmallStringEnc &Enc, const PointerType *PT,
6603 const CodeGen::CodeGenModule &CGM,
6604 TypeStringCache &TSC) {
6606 if (!appendType(Enc, PT->getPointeeType(), CGM, TSC))
6612 /// Appends array encoding to Enc before calling appendType for the element.
6613 static bool appendArrayType(SmallStringEnc &Enc, QualType QT,
6614 const ArrayType *AT,
6615 const CodeGen::CodeGenModule &CGM,
6616 TypeStringCache &TSC, StringRef NoSizeEnc) {
6617 if (AT->getSizeModifier() != ArrayType::Normal)
6620 if (const ConstantArrayType *CAT = dyn_cast<ConstantArrayType>(AT))
6621 CAT->getSize().toStringUnsigned(Enc);
6623 Enc += NoSizeEnc; // Global arrays use "*", otherwise it is "".
6625 // The Qualifiers should be attached to the type rather than the array.
6626 appendQualifier(Enc, QT);
6627 if (!appendType(Enc, AT->getElementType(), CGM, TSC))
6633 /// Appends a function encoding to Enc, calling appendType for the return type
6634 /// and the arguments.
6635 static bool appendFunctionType(SmallStringEnc &Enc, const FunctionType *FT,
6636 const CodeGen::CodeGenModule &CGM,
6637 TypeStringCache &TSC) {
6639 if (!appendType(Enc, FT->getReturnType(), CGM, TSC))
6642 if (const FunctionProtoType *FPT = FT->getAs<FunctionProtoType>()) {
6643 // N.B. we are only interested in the adjusted param types.
6644 auto I = FPT->param_type_begin();
6645 auto E = FPT->param_type_end();
6648 if (!appendType(Enc, *I, CGM, TSC))
6654 if (FPT->isVariadic())
6657 if (FPT->isVariadic())
6667 /// Handles the type's qualifier before dispatching a call to handle specific
6669 static bool appendType(SmallStringEnc &Enc, QualType QType,
6670 const CodeGen::CodeGenModule &CGM,
6671 TypeStringCache &TSC) {
6673 QualType QT = QType.getCanonicalType();
6675 if (const ArrayType *AT = QT->getAsArrayTypeUnsafe())
6676 // The Qualifiers should be attached to the type rather than the array.
6677 // Thus we don't call appendQualifier() here.
6678 return appendArrayType(Enc, QT, AT, CGM, TSC, "");
6680 appendQualifier(Enc, QT);
6682 if (const BuiltinType *BT = QT->getAs<BuiltinType>())
6683 return appendBuiltinType(Enc, BT);
6685 if (const PointerType *PT = QT->getAs<PointerType>())
6686 return appendPointerType(Enc, PT, CGM, TSC);
6688 if (const EnumType *ET = QT->getAs<EnumType>())
6689 return appendEnumType(Enc, ET, TSC, QT.getBaseTypeIdentifier());
6691 if (const RecordType *RT = QT->getAsStructureType())
6692 return appendRecordType(Enc, RT, CGM, TSC, QT.getBaseTypeIdentifier());
6694 if (const RecordType *RT = QT->getAsUnionType())
6695 return appendRecordType(Enc, RT, CGM, TSC, QT.getBaseTypeIdentifier());
6697 if (const FunctionType *FT = QT->getAs<FunctionType>())
6698 return appendFunctionType(Enc, FT, CGM, TSC);
6703 static bool getTypeString(SmallStringEnc &Enc, const Decl *D,
6704 CodeGen::CodeGenModule &CGM, TypeStringCache &TSC) {
6708 if (const FunctionDecl *FD = dyn_cast<FunctionDecl>(D)) {
6709 if (FD->getLanguageLinkage() != CLanguageLinkage)
6711 return appendType(Enc, FD->getType(), CGM, TSC);
6714 if (const VarDecl *VD = dyn_cast<VarDecl>(D)) {
6715 if (VD->getLanguageLinkage() != CLanguageLinkage)
6717 QualType QT = VD->getType().getCanonicalType();
6718 if (const ArrayType *AT = QT->getAsArrayTypeUnsafe()) {
6719 // Global ArrayTypes are given a size of '*' if the size is unknown.
6720 // The Qualifiers should be attached to the type rather than the array.
6721 // Thus we don't call appendQualifier() here.
6722 return appendArrayType(Enc, QT, AT, CGM, TSC, "*");
6724 return appendType(Enc, QT, CGM, TSC);
6730 //===----------------------------------------------------------------------===//
6732 //===----------------------------------------------------------------------===//
6734 const TargetCodeGenInfo &CodeGenModule::getTargetCodeGenInfo() {
6735 if (TheTargetCodeGenInfo)
6736 return *TheTargetCodeGenInfo;
6738 const llvm::Triple &Triple = getTarget().getTriple();
6739 switch (Triple.getArch()) {
6741 return *(TheTargetCodeGenInfo = new DefaultTargetCodeGenInfo(Types));
6743 case llvm::Triple::le32:
6744 return *(TheTargetCodeGenInfo = new PNaClTargetCodeGenInfo(Types));
6745 case llvm::Triple::mips:
6746 case llvm::Triple::mipsel:
6747 return *(TheTargetCodeGenInfo = new MIPSTargetCodeGenInfo(Types, true));
6749 case llvm::Triple::mips64:
6750 case llvm::Triple::mips64el:
6751 return *(TheTargetCodeGenInfo = new MIPSTargetCodeGenInfo(Types, false));
6753 case llvm::Triple::aarch64:
6754 case llvm::Triple::aarch64_be:
6755 case llvm::Triple::arm64:
6756 case llvm::Triple::arm64_be: {
6757 AArch64ABIInfo::ABIKind Kind = AArch64ABIInfo::AAPCS;
6758 if (getTarget().getABI() == "darwinpcs")
6759 Kind = AArch64ABIInfo::DarwinPCS;
6761 return *(TheTargetCodeGenInfo = new AArch64TargetCodeGenInfo(Types, Kind));
6764 case llvm::Triple::arm:
6765 case llvm::Triple::armeb:
6766 case llvm::Triple::thumb:
6767 case llvm::Triple::thumbeb:
6769 ARMABIInfo::ABIKind Kind = ARMABIInfo::AAPCS;
6770 if (getTarget().getABI() == "apcs-gnu")
6771 Kind = ARMABIInfo::APCS;
6772 else if (CodeGenOpts.FloatABI == "hard" ||
6773 (CodeGenOpts.FloatABI != "soft" &&
6774 Triple.getEnvironment() == llvm::Triple::GNUEABIHF))
6775 Kind = ARMABIInfo::AAPCS_VFP;
6777 switch (Triple.getOS()) {
6778 case llvm::Triple::NaCl:
6779 return *(TheTargetCodeGenInfo =
6780 new NaClARMTargetCodeGenInfo(Types, Kind));
6782 return *(TheTargetCodeGenInfo =
6783 new ARMTargetCodeGenInfo(Types, Kind));
6787 case llvm::Triple::ppc:
6788 return *(TheTargetCodeGenInfo = new PPC32TargetCodeGenInfo(Types));
6789 case llvm::Triple::ppc64:
6790 if (Triple.isOSBinFormatELF()) {
6791 // FIXME: Should be switchable via command-line option.
6792 PPC64_SVR4_ABIInfo::ABIKind Kind = PPC64_SVR4_ABIInfo::ELFv1;
6793 return *(TheTargetCodeGenInfo =
6794 new PPC64_SVR4_TargetCodeGenInfo(Types, Kind));
6796 return *(TheTargetCodeGenInfo = new PPC64TargetCodeGenInfo(Types));
6797 case llvm::Triple::ppc64le: {
6798 assert(Triple.isOSBinFormatELF() && "PPC64 LE non-ELF not supported!");
6799 // FIXME: Should be switchable via command-line option.
6800 PPC64_SVR4_ABIInfo::ABIKind Kind = PPC64_SVR4_ABIInfo::ELFv2;
6801 return *(TheTargetCodeGenInfo =
6802 new PPC64_SVR4_TargetCodeGenInfo(Types, Kind));
6805 case llvm::Triple::nvptx:
6806 case llvm::Triple::nvptx64:
6807 return *(TheTargetCodeGenInfo = new NVPTXTargetCodeGenInfo(Types));
6809 case llvm::Triple::msp430:
6810 return *(TheTargetCodeGenInfo = new MSP430TargetCodeGenInfo(Types));
6812 case llvm::Triple::systemz:
6813 return *(TheTargetCodeGenInfo = new SystemZTargetCodeGenInfo(Types));
6815 case llvm::Triple::tce:
6816 return *(TheTargetCodeGenInfo = new TCETargetCodeGenInfo(Types));
6818 case llvm::Triple::x86: {
6819 bool IsDarwinVectorABI = Triple.isOSDarwin();
6820 bool IsSmallStructInRegABI =
6821 X86_32TargetCodeGenInfo::isStructReturnInRegABI(Triple, CodeGenOpts);
6822 bool IsWin32FloatStructABI = Triple.isWindowsMSVCEnvironment();
6824 if (Triple.getOS() == llvm::Triple::Win32) {
6825 return *(TheTargetCodeGenInfo =
6826 new WinX86_32TargetCodeGenInfo(Types,
6827 IsDarwinVectorABI, IsSmallStructInRegABI,
6828 IsWin32FloatStructABI,
6829 CodeGenOpts.NumRegisterParameters));
6831 return *(TheTargetCodeGenInfo =
6832 new X86_32TargetCodeGenInfo(Types,
6833 IsDarwinVectorABI, IsSmallStructInRegABI,
6834 IsWin32FloatStructABI,
6835 CodeGenOpts.NumRegisterParameters));
6839 case llvm::Triple::x86_64: {
6840 bool HasAVX = getTarget().getABI() == "avx";
6842 switch (Triple.getOS()) {
6843 case llvm::Triple::Win32:
6844 return *(TheTargetCodeGenInfo = new WinX86_64TargetCodeGenInfo(Types));
6845 case llvm::Triple::NaCl:
6846 return *(TheTargetCodeGenInfo = new NaClX86_64TargetCodeGenInfo(Types,
6849 return *(TheTargetCodeGenInfo = new X86_64TargetCodeGenInfo(Types,
6853 case llvm::Triple::hexagon:
6854 return *(TheTargetCodeGenInfo = new HexagonTargetCodeGenInfo(Types));
6855 case llvm::Triple::sparcv9:
6856 return *(TheTargetCodeGenInfo = new SparcV9TargetCodeGenInfo(Types));
6857 case llvm::Triple::xcore:
6858 return *(TheTargetCodeGenInfo = new XCoreTargetCodeGenInfo(Types));