1 //===--- CGCall.cpp - Encapsulate calling convention details --------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // These classes wrap the information about a call or function
11 // definition used to handle ABI compliancy.
13 //===----------------------------------------------------------------------===//
18 #include "CodeGenFunction.h"
19 #include "CodeGenModule.h"
20 #include "TargetInfo.h"
21 #include "clang/AST/Decl.h"
22 #include "clang/AST/DeclCXX.h"
23 #include "clang/AST/DeclObjC.h"
24 #include "clang/Basic/TargetInfo.h"
25 #include "clang/CodeGen/CGFunctionInfo.h"
26 #include "clang/Frontend/CodeGenOptions.h"
27 #include "llvm/ADT/StringExtras.h"
28 #include "llvm/IR/Attributes.h"
29 #include "llvm/IR/CallSite.h"
30 #include "llvm/IR/DataLayout.h"
31 #include "llvm/IR/InlineAsm.h"
32 #include "llvm/IR/Intrinsics.h"
33 #include "llvm/IR/IntrinsicInst.h"
34 #include "llvm/Transforms/Utils/Local.h"
35 using namespace clang;
36 using namespace CodeGen;
40 static unsigned ClangCallConvToLLVMCallConv(CallingConv CC) {
42 default: return llvm::CallingConv::C;
43 case CC_X86StdCall: return llvm::CallingConv::X86_StdCall;
44 case CC_X86FastCall: return llvm::CallingConv::X86_FastCall;
45 case CC_X86ThisCall: return llvm::CallingConv::X86_ThisCall;
46 case CC_X86_64Win64: return llvm::CallingConv::X86_64_Win64;
47 case CC_X86_64SysV: return llvm::CallingConv::X86_64_SysV;
48 case CC_AAPCS: return llvm::CallingConv::ARM_AAPCS;
49 case CC_AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP;
50 case CC_IntelOclBicc: return llvm::CallingConv::Intel_OCL_BI;
51 // TODO: Add support for __pascal to LLVM.
52 case CC_X86Pascal: return llvm::CallingConv::C;
53 // TODO: Add support for __vectorcall to LLVM.
54 case CC_X86VectorCall: return llvm::CallingConv::X86_VectorCall;
55 case CC_SpirFunction: return llvm::CallingConv::SPIR_FUNC;
56 case CC_SpirKernel: return llvm::CallingConv::SPIR_KERNEL;
60 /// Derives the 'this' type for codegen purposes, i.e. ignoring method
62 /// FIXME: address space qualification?
63 static CanQualType GetThisType(ASTContext &Context, const CXXRecordDecl *RD) {
64 QualType RecTy = Context.getTagDeclType(RD)->getCanonicalTypeInternal();
65 return Context.getPointerType(CanQualType::CreateUnsafe(RecTy));
68 /// Returns the canonical formal type of the given C++ method.
69 static CanQual<FunctionProtoType> GetFormalType(const CXXMethodDecl *MD) {
70 return MD->getType()->getCanonicalTypeUnqualified()
71 .getAs<FunctionProtoType>();
74 /// Returns the "extra-canonicalized" return type, which discards
75 /// qualifiers on the return type. Codegen doesn't care about them,
76 /// and it makes ABI code a little easier to be able to assume that
77 /// all parameter and return types are top-level unqualified.
78 static CanQualType GetReturnType(QualType RetTy) {
79 return RetTy->getCanonicalTypeUnqualified().getUnqualifiedType();
82 /// Arrange the argument and result information for a value of the given
83 /// unprototyped freestanding function type.
84 const CGFunctionInfo &
85 CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionNoProtoType> FTNP) {
86 // When translating an unprototyped function type, always use a
88 return arrangeLLVMFunctionInfo(FTNP->getReturnType().getUnqualifiedType(),
89 /*instanceMethod=*/false,
90 /*chainCall=*/false, None,
91 FTNP->getExtInfo(), RequiredArgs(0));
94 /// Arrange the LLVM function layout for a value of the given function
95 /// type, on top of any implicit parameters already stored.
96 static const CGFunctionInfo &
97 arrangeLLVMFunctionInfo(CodeGenTypes &CGT, bool instanceMethod,
98 SmallVectorImpl<CanQualType> &prefix,
99 CanQual<FunctionProtoType> FTP) {
100 RequiredArgs required = RequiredArgs::forPrototypePlus(FTP, prefix.size());
102 prefix.append(FTP->param_type_begin(), FTP->param_type_end());
103 CanQualType resultType = FTP->getReturnType().getUnqualifiedType();
104 return CGT.arrangeLLVMFunctionInfo(resultType, instanceMethod,
105 /*chainCall=*/false, prefix,
106 FTP->getExtInfo(), required);
109 /// Arrange the argument and result information for a value of the
110 /// given freestanding function type.
111 const CGFunctionInfo &
112 CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionProtoType> FTP) {
113 SmallVector<CanQualType, 16> argTypes;
114 return ::arrangeLLVMFunctionInfo(*this, /*instanceMethod=*/false, argTypes,
118 static CallingConv getCallingConventionForDecl(const Decl *D, bool IsWindows) {
119 // Set the appropriate calling convention for the Function.
120 if (D->hasAttr<StdCallAttr>())
121 return CC_X86StdCall;
123 if (D->hasAttr<FastCallAttr>())
124 return CC_X86FastCall;
126 if (D->hasAttr<ThisCallAttr>())
127 return CC_X86ThisCall;
129 if (D->hasAttr<VectorCallAttr>())
130 return CC_X86VectorCall;
132 if (D->hasAttr<PascalAttr>())
135 if (PcsAttr *PCS = D->getAttr<PcsAttr>())
136 return (PCS->getPCS() == PcsAttr::AAPCS ? CC_AAPCS : CC_AAPCS_VFP);
138 if (D->hasAttr<IntelOclBiccAttr>())
139 return CC_IntelOclBicc;
141 if (D->hasAttr<MSABIAttr>())
142 return IsWindows ? CC_C : CC_X86_64Win64;
144 if (D->hasAttr<SysVABIAttr>())
145 return IsWindows ? CC_X86_64SysV : CC_C;
150 /// Arrange the argument and result information for a call to an
151 /// unknown C++ non-static member function of the given abstract type.
152 /// (Zero value of RD means we don't have any meaningful "this" argument type,
153 /// so fall back to a generic pointer type).
154 /// The member function must be an ordinary function, i.e. not a
155 /// constructor or destructor.
156 const CGFunctionInfo &
157 CodeGenTypes::arrangeCXXMethodType(const CXXRecordDecl *RD,
158 const FunctionProtoType *FTP) {
159 SmallVector<CanQualType, 16> argTypes;
161 // Add the 'this' pointer.
163 argTypes.push_back(GetThisType(Context, RD));
165 argTypes.push_back(Context.VoidPtrTy);
167 return ::arrangeLLVMFunctionInfo(
168 *this, true, argTypes,
169 FTP->getCanonicalTypeUnqualified().getAs<FunctionProtoType>());
172 /// Arrange the argument and result information for a declaration or
173 /// definition of the given C++ non-static member function. The
174 /// member function must be an ordinary function, i.e. not a
175 /// constructor or destructor.
176 const CGFunctionInfo &
177 CodeGenTypes::arrangeCXXMethodDeclaration(const CXXMethodDecl *MD) {
178 assert(!isa<CXXConstructorDecl>(MD) && "wrong method for constructors!");
179 assert(!isa<CXXDestructorDecl>(MD) && "wrong method for destructors!");
181 CanQual<FunctionProtoType> prototype = GetFormalType(MD);
183 if (MD->isInstance()) {
184 // The abstract case is perfectly fine.
185 const CXXRecordDecl *ThisType = TheCXXABI.getThisArgumentTypeForMethod(MD);
186 return arrangeCXXMethodType(ThisType, prototype.getTypePtr());
189 return arrangeFreeFunctionType(prototype);
192 const CGFunctionInfo &
193 CodeGenTypes::arrangeCXXStructorDeclaration(const CXXMethodDecl *MD,
196 SmallVector<CanQualType, 16> argTypes;
197 argTypes.push_back(GetThisType(Context, MD->getParent()));
200 if (auto *CD = dyn_cast<CXXConstructorDecl>(MD)) {
201 GD = GlobalDecl(CD, toCXXCtorType(Type));
203 auto *DD = dyn_cast<CXXDestructorDecl>(MD);
204 GD = GlobalDecl(DD, toCXXDtorType(Type));
207 CanQual<FunctionProtoType> FTP = GetFormalType(MD);
209 // Add the formal parameters.
210 argTypes.append(FTP->param_type_begin(), FTP->param_type_end());
212 TheCXXABI.buildStructorSignature(MD, Type, argTypes);
214 RequiredArgs required =
215 (MD->isVariadic() ? RequiredArgs(argTypes.size()) : RequiredArgs::All);
217 FunctionType::ExtInfo extInfo = FTP->getExtInfo();
218 CanQualType resultType = TheCXXABI.HasThisReturn(GD)
220 : TheCXXABI.hasMostDerivedReturn(GD)
221 ? CGM.getContext().VoidPtrTy
223 return arrangeLLVMFunctionInfo(resultType, /*instanceMethod=*/true,
224 /*chainCall=*/false, argTypes, extInfo,
228 /// Arrange a call to a C++ method, passing the given arguments.
229 const CGFunctionInfo &
230 CodeGenTypes::arrangeCXXConstructorCall(const CallArgList &args,
231 const CXXConstructorDecl *D,
232 CXXCtorType CtorKind,
233 unsigned ExtraArgs) {
235 SmallVector<CanQualType, 16> ArgTypes;
236 for (const auto &Arg : args)
237 ArgTypes.push_back(Context.getCanonicalParamType(Arg.Ty));
239 CanQual<FunctionProtoType> FPT = GetFormalType(D);
240 RequiredArgs Required = RequiredArgs::forPrototypePlus(FPT, 1 + ExtraArgs);
241 GlobalDecl GD(D, CtorKind);
242 CanQualType ResultType = TheCXXABI.HasThisReturn(GD)
244 : TheCXXABI.hasMostDerivedReturn(GD)
245 ? CGM.getContext().VoidPtrTy
248 FunctionType::ExtInfo Info = FPT->getExtInfo();
249 return arrangeLLVMFunctionInfo(ResultType, /*instanceMethod=*/true,
250 /*chainCall=*/false, ArgTypes, Info,
254 /// Arrange the argument and result information for the declaration or
255 /// definition of the given function.
256 const CGFunctionInfo &
257 CodeGenTypes::arrangeFunctionDeclaration(const FunctionDecl *FD) {
258 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD))
259 if (MD->isInstance())
260 return arrangeCXXMethodDeclaration(MD);
262 CanQualType FTy = FD->getType()->getCanonicalTypeUnqualified();
264 assert(isa<FunctionType>(FTy));
266 // When declaring a function without a prototype, always use a
267 // non-variadic type.
268 if (isa<FunctionNoProtoType>(FTy)) {
269 CanQual<FunctionNoProtoType> noProto = FTy.getAs<FunctionNoProtoType>();
270 return arrangeLLVMFunctionInfo(
271 noProto->getReturnType(), /*instanceMethod=*/false,
272 /*chainCall=*/false, None, noProto->getExtInfo(), RequiredArgs::All);
275 assert(isa<FunctionProtoType>(FTy));
276 return arrangeFreeFunctionType(FTy.getAs<FunctionProtoType>());
279 /// Arrange the argument and result information for the declaration or
280 /// definition of an Objective-C method.
281 const CGFunctionInfo &
282 CodeGenTypes::arrangeObjCMethodDeclaration(const ObjCMethodDecl *MD) {
283 // It happens that this is the same as a call with no optional
284 // arguments, except also using the formal 'self' type.
285 return arrangeObjCMessageSendSignature(MD, MD->getSelfDecl()->getType());
288 /// Arrange the argument and result information for the function type
289 /// through which to perform a send to the given Objective-C method,
290 /// using the given receiver type. The receiver type is not always
291 /// the 'self' type of the method or even an Objective-C pointer type.
292 /// This is *not* the right method for actually performing such a
293 /// message send, due to the possibility of optional arguments.
294 const CGFunctionInfo &
295 CodeGenTypes::arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD,
296 QualType receiverType) {
297 SmallVector<CanQualType, 16> argTys;
298 argTys.push_back(Context.getCanonicalParamType(receiverType));
299 argTys.push_back(Context.getCanonicalParamType(Context.getObjCSelType()));
301 for (const auto *I : MD->params()) {
302 argTys.push_back(Context.getCanonicalParamType(I->getType()));
305 FunctionType::ExtInfo einfo;
306 bool IsWindows = getContext().getTargetInfo().getTriple().isOSWindows();
307 einfo = einfo.withCallingConv(getCallingConventionForDecl(MD, IsWindows));
309 if (getContext().getLangOpts().ObjCAutoRefCount &&
310 MD->hasAttr<NSReturnsRetainedAttr>())
311 einfo = einfo.withProducesResult(true);
313 RequiredArgs required =
314 (MD->isVariadic() ? RequiredArgs(argTys.size()) : RequiredArgs::All);
316 return arrangeLLVMFunctionInfo(
317 GetReturnType(MD->getReturnType()), /*instanceMethod=*/false,
318 /*chainCall=*/false, argTys, einfo, required);
321 const CGFunctionInfo &
322 CodeGenTypes::arrangeGlobalDeclaration(GlobalDecl GD) {
323 // FIXME: Do we need to handle ObjCMethodDecl?
324 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
326 if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD))
327 return arrangeCXXStructorDeclaration(CD, getFromCtorType(GD.getCtorType()));
329 if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(FD))
330 return arrangeCXXStructorDeclaration(DD, getFromDtorType(GD.getDtorType()));
332 return arrangeFunctionDeclaration(FD);
335 /// Arrange a thunk that takes 'this' as the first parameter followed by
336 /// varargs. Return a void pointer, regardless of the actual return type.
337 /// The body of the thunk will end in a musttail call to a function of the
338 /// correct type, and the caller will bitcast the function to the correct
340 const CGFunctionInfo &
341 CodeGenTypes::arrangeMSMemberPointerThunk(const CXXMethodDecl *MD) {
342 assert(MD->isVirtual() && "only virtual memptrs have thunks");
343 CanQual<FunctionProtoType> FTP = GetFormalType(MD);
344 CanQualType ArgTys[] = { GetThisType(Context, MD->getParent()) };
345 return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/false,
346 /*chainCall=*/false, ArgTys,
347 FTP->getExtInfo(), RequiredArgs(1));
350 const CGFunctionInfo &
351 CodeGenTypes::arrangeMSCtorClosure(const CXXConstructorDecl *CD,
353 assert(CT == Ctor_CopyingClosure || CT == Ctor_DefaultClosure);
355 CanQual<FunctionProtoType> FTP = GetFormalType(CD);
356 SmallVector<CanQualType, 2> ArgTys;
357 const CXXRecordDecl *RD = CD->getParent();
358 ArgTys.push_back(GetThisType(Context, RD));
359 if (CT == Ctor_CopyingClosure)
360 ArgTys.push_back(*FTP->param_type_begin());
361 if (RD->getNumVBases() > 0)
362 ArgTys.push_back(Context.IntTy);
363 CallingConv CC = Context.getDefaultCallingConvention(
364 /*IsVariadic=*/false, /*IsCXXMethod=*/true);
365 return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/true,
366 /*chainCall=*/false, ArgTys,
367 FunctionType::ExtInfo(CC), RequiredArgs::All);
370 /// Arrange a call as unto a free function, except possibly with an
371 /// additional number of formal parameters considered required.
372 static const CGFunctionInfo &
373 arrangeFreeFunctionLikeCall(CodeGenTypes &CGT,
375 const CallArgList &args,
376 const FunctionType *fnType,
377 unsigned numExtraRequiredArgs,
379 assert(args.size() >= numExtraRequiredArgs);
381 // In most cases, there are no optional arguments.
382 RequiredArgs required = RequiredArgs::All;
384 // If we have a variadic prototype, the required arguments are the
385 // extra prefix plus the arguments in the prototype.
386 if (const FunctionProtoType *proto = dyn_cast<FunctionProtoType>(fnType)) {
387 if (proto->isVariadic())
388 required = RequiredArgs(proto->getNumParams() + numExtraRequiredArgs);
390 // If we don't have a prototype at all, but we're supposed to
391 // explicitly use the variadic convention for unprototyped calls,
392 // treat all of the arguments as required but preserve the nominal
393 // possibility of variadics.
394 } else if (CGM.getTargetCodeGenInfo()
395 .isNoProtoCallVariadic(args,
396 cast<FunctionNoProtoType>(fnType))) {
397 required = RequiredArgs(args.size());
401 SmallVector<CanQualType, 16> argTypes;
402 for (const auto &arg : args)
403 argTypes.push_back(CGT.getContext().getCanonicalParamType(arg.Ty));
404 return CGT.arrangeLLVMFunctionInfo(GetReturnType(fnType->getReturnType()),
405 /*instanceMethod=*/false, chainCall,
406 argTypes, fnType->getExtInfo(), required);
409 /// Figure out the rules for calling a function with the given formal
410 /// type using the given arguments. The arguments are necessary
411 /// because the function might be unprototyped, in which case it's
412 /// target-dependent in crazy ways.
413 const CGFunctionInfo &
414 CodeGenTypes::arrangeFreeFunctionCall(const CallArgList &args,
415 const FunctionType *fnType,
417 return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType,
418 chainCall ? 1 : 0, chainCall);
421 /// A block function call is essentially a free-function call with an
422 /// extra implicit argument.
423 const CGFunctionInfo &
424 CodeGenTypes::arrangeBlockFunctionCall(const CallArgList &args,
425 const FunctionType *fnType) {
426 return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType, 1,
427 /*chainCall=*/false);
430 const CGFunctionInfo &
431 CodeGenTypes::arrangeFreeFunctionCall(QualType resultType,
432 const CallArgList &args,
433 FunctionType::ExtInfo info,
434 RequiredArgs required) {
436 SmallVector<CanQualType, 16> argTypes;
437 for (const auto &Arg : args)
438 argTypes.push_back(Context.getCanonicalParamType(Arg.Ty));
439 return arrangeLLVMFunctionInfo(
440 GetReturnType(resultType), /*instanceMethod=*/false,
441 /*chainCall=*/false, argTypes, info, required);
444 /// Arrange a call to a C++ method, passing the given arguments.
445 const CGFunctionInfo &
446 CodeGenTypes::arrangeCXXMethodCall(const CallArgList &args,
447 const FunctionProtoType *FPT,
448 RequiredArgs required) {
450 SmallVector<CanQualType, 16> argTypes;
451 for (const auto &Arg : args)
452 argTypes.push_back(Context.getCanonicalParamType(Arg.Ty));
454 FunctionType::ExtInfo info = FPT->getExtInfo();
455 return arrangeLLVMFunctionInfo(
456 GetReturnType(FPT->getReturnType()), /*instanceMethod=*/true,
457 /*chainCall=*/false, argTypes, info, required);
460 const CGFunctionInfo &CodeGenTypes::arrangeFreeFunctionDeclaration(
461 QualType resultType, const FunctionArgList &args,
462 const FunctionType::ExtInfo &info, bool isVariadic) {
464 SmallVector<CanQualType, 16> argTypes;
465 for (auto Arg : args)
466 argTypes.push_back(Context.getCanonicalParamType(Arg->getType()));
468 RequiredArgs required =
469 (isVariadic ? RequiredArgs(args.size()) : RequiredArgs::All);
470 return arrangeLLVMFunctionInfo(
471 GetReturnType(resultType), /*instanceMethod=*/false,
472 /*chainCall=*/false, argTypes, info, required);
475 const CGFunctionInfo &CodeGenTypes::arrangeNullaryFunction() {
476 return arrangeLLVMFunctionInfo(
477 getContext().VoidTy, /*instanceMethod=*/false, /*chainCall=*/false,
478 None, FunctionType::ExtInfo(), RequiredArgs::All);
481 /// Arrange the argument and result information for an abstract value
482 /// of a given function type. This is the method which all of the
483 /// above functions ultimately defer to.
484 const CGFunctionInfo &
485 CodeGenTypes::arrangeLLVMFunctionInfo(CanQualType resultType,
488 ArrayRef<CanQualType> argTypes,
489 FunctionType::ExtInfo info,
490 RequiredArgs required) {
491 assert(std::all_of(argTypes.begin(), argTypes.end(),
492 std::mem_fun_ref(&CanQualType::isCanonicalAsParam)));
494 unsigned CC = ClangCallConvToLLVMCallConv(info.getCC());
496 // Lookup or create unique function info.
497 llvm::FoldingSetNodeID ID;
498 CGFunctionInfo::Profile(ID, instanceMethod, chainCall, info, required,
499 resultType, argTypes);
501 void *insertPos = nullptr;
502 CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, insertPos);
506 // Construct the function info. We co-allocate the ArgInfos.
507 FI = CGFunctionInfo::create(CC, instanceMethod, chainCall, info,
508 resultType, argTypes, required);
509 FunctionInfos.InsertNode(FI, insertPos);
511 bool inserted = FunctionsBeingProcessed.insert(FI).second;
513 assert(inserted && "Recursively being processed?");
515 // Compute ABI information.
516 getABIInfo().computeInfo(*FI);
518 // Loop over all of the computed argument and return value info. If any of
519 // them are direct or extend without a specified coerce type, specify the
521 ABIArgInfo &retInfo = FI->getReturnInfo();
522 if (retInfo.canHaveCoerceToType() && retInfo.getCoerceToType() == nullptr)
523 retInfo.setCoerceToType(ConvertType(FI->getReturnType()));
525 for (auto &I : FI->arguments())
526 if (I.info.canHaveCoerceToType() && I.info.getCoerceToType() == nullptr)
527 I.info.setCoerceToType(ConvertType(I.type));
529 bool erased = FunctionsBeingProcessed.erase(FI); (void)erased;
530 assert(erased && "Not in set?");
535 CGFunctionInfo *CGFunctionInfo::create(unsigned llvmCC,
538 const FunctionType::ExtInfo &info,
539 CanQualType resultType,
540 ArrayRef<CanQualType> argTypes,
541 RequiredArgs required) {
542 void *buffer = operator new(sizeof(CGFunctionInfo) +
543 sizeof(ArgInfo) * (argTypes.size() + 1));
544 CGFunctionInfo *FI = new(buffer) CGFunctionInfo();
545 FI->CallingConvention = llvmCC;
546 FI->EffectiveCallingConvention = llvmCC;
547 FI->ASTCallingConvention = info.getCC();
548 FI->InstanceMethod = instanceMethod;
549 FI->ChainCall = chainCall;
550 FI->NoReturn = info.getNoReturn();
551 FI->ReturnsRetained = info.getProducesResult();
552 FI->Required = required;
553 FI->HasRegParm = info.getHasRegParm();
554 FI->RegParm = info.getRegParm();
555 FI->ArgStruct = nullptr;
556 FI->NumArgs = argTypes.size();
557 FI->getArgsBuffer()[0].type = resultType;
558 for (unsigned i = 0, e = argTypes.size(); i != e; ++i)
559 FI->getArgsBuffer()[i + 1].type = argTypes[i];
566 // ABIArgInfo::Expand implementation.
568 // Specifies the way QualType passed as ABIArgInfo::Expand is expanded.
569 struct TypeExpansion {
570 enum TypeExpansionKind {
571 // Elements of constant arrays are expanded recursively.
573 // Record fields are expanded recursively (but if record is a union, only
574 // the field with the largest size is expanded).
576 // For complex types, real and imaginary parts are expanded recursively.
578 // All other types are not expandable.
582 const TypeExpansionKind Kind;
584 TypeExpansion(TypeExpansionKind K) : Kind(K) {}
585 virtual ~TypeExpansion() {}
588 struct ConstantArrayExpansion : TypeExpansion {
592 ConstantArrayExpansion(QualType EltTy, uint64_t NumElts)
593 : TypeExpansion(TEK_ConstantArray), EltTy(EltTy), NumElts(NumElts) {}
594 static bool classof(const TypeExpansion *TE) {
595 return TE->Kind == TEK_ConstantArray;
599 struct RecordExpansion : TypeExpansion {
600 SmallVector<const CXXBaseSpecifier *, 1> Bases;
602 SmallVector<const FieldDecl *, 1> Fields;
604 RecordExpansion(SmallVector<const CXXBaseSpecifier *, 1> &&Bases,
605 SmallVector<const FieldDecl *, 1> &&Fields)
606 : TypeExpansion(TEK_Record), Bases(Bases), Fields(Fields) {}
607 static bool classof(const TypeExpansion *TE) {
608 return TE->Kind == TEK_Record;
612 struct ComplexExpansion : TypeExpansion {
615 ComplexExpansion(QualType EltTy) : TypeExpansion(TEK_Complex), EltTy(EltTy) {}
616 static bool classof(const TypeExpansion *TE) {
617 return TE->Kind == TEK_Complex;
621 struct NoExpansion : TypeExpansion {
622 NoExpansion() : TypeExpansion(TEK_None) {}
623 static bool classof(const TypeExpansion *TE) {
624 return TE->Kind == TEK_None;
629 static std::unique_ptr<TypeExpansion>
630 getTypeExpansion(QualType Ty, const ASTContext &Context) {
631 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) {
632 return llvm::make_unique<ConstantArrayExpansion>(
633 AT->getElementType(), AT->getSize().getZExtValue());
635 if (const RecordType *RT = Ty->getAs<RecordType>()) {
636 SmallVector<const CXXBaseSpecifier *, 1> Bases;
637 SmallVector<const FieldDecl *, 1> Fields;
638 const RecordDecl *RD = RT->getDecl();
639 assert(!RD->hasFlexibleArrayMember() &&
640 "Cannot expand structure with flexible array.");
642 // Unions can be here only in degenerative cases - all the fields are same
643 // after flattening. Thus we have to use the "largest" field.
644 const FieldDecl *LargestFD = nullptr;
645 CharUnits UnionSize = CharUnits::Zero();
647 for (const auto *FD : RD->fields()) {
648 // Skip zero length bitfields.
649 if (FD->isBitField() && FD->getBitWidthValue(Context) == 0)
651 assert(!FD->isBitField() &&
652 "Cannot expand structure with bit-field members.");
653 CharUnits FieldSize = Context.getTypeSizeInChars(FD->getType());
654 if (UnionSize < FieldSize) {
655 UnionSize = FieldSize;
660 Fields.push_back(LargestFD);
662 if (const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
663 assert(!CXXRD->isDynamicClass() &&
664 "cannot expand vtable pointers in dynamic classes");
665 for (const CXXBaseSpecifier &BS : CXXRD->bases())
666 Bases.push_back(&BS);
669 for (const auto *FD : RD->fields()) {
670 // Skip zero length bitfields.
671 if (FD->isBitField() && FD->getBitWidthValue(Context) == 0)
673 assert(!FD->isBitField() &&
674 "Cannot expand structure with bit-field members.");
675 Fields.push_back(FD);
678 return llvm::make_unique<RecordExpansion>(std::move(Bases),
681 if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
682 return llvm::make_unique<ComplexExpansion>(CT->getElementType());
684 return llvm::make_unique<NoExpansion>();
687 static int getExpansionSize(QualType Ty, const ASTContext &Context) {
688 auto Exp = getTypeExpansion(Ty, Context);
689 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
690 return CAExp->NumElts * getExpansionSize(CAExp->EltTy, Context);
692 if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
694 for (auto BS : RExp->Bases)
695 Res += getExpansionSize(BS->getType(), Context);
696 for (auto FD : RExp->Fields)
697 Res += getExpansionSize(FD->getType(), Context);
700 if (isa<ComplexExpansion>(Exp.get()))
702 assert(isa<NoExpansion>(Exp.get()));
707 CodeGenTypes::getExpandedTypes(QualType Ty,
708 SmallVectorImpl<llvm::Type *>::iterator &TI) {
709 auto Exp = getTypeExpansion(Ty, Context);
710 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
711 for (int i = 0, n = CAExp->NumElts; i < n; i++) {
712 getExpandedTypes(CAExp->EltTy, TI);
714 } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
715 for (auto BS : RExp->Bases)
716 getExpandedTypes(BS->getType(), TI);
717 for (auto FD : RExp->Fields)
718 getExpandedTypes(FD->getType(), TI);
719 } else if (auto CExp = dyn_cast<ComplexExpansion>(Exp.get())) {
720 llvm::Type *EltTy = ConvertType(CExp->EltTy);
724 assert(isa<NoExpansion>(Exp.get()));
725 *TI++ = ConvertType(Ty);
729 void CodeGenFunction::ExpandTypeFromArgs(
730 QualType Ty, LValue LV, SmallVectorImpl<llvm::Argument *>::iterator &AI) {
731 assert(LV.isSimple() &&
732 "Unexpected non-simple lvalue during struct expansion.");
734 auto Exp = getTypeExpansion(Ty, getContext());
735 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
736 for (int i = 0, n = CAExp->NumElts; i < n; i++) {
737 llvm::Value *EltAddr =
738 Builder.CreateConstGEP2_32(nullptr, LV.getAddress(), 0, i);
739 LValue LV = MakeAddrLValue(EltAddr, CAExp->EltTy);
740 ExpandTypeFromArgs(CAExp->EltTy, LV, AI);
742 } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
743 llvm::Value *This = LV.getAddress();
744 for (const CXXBaseSpecifier *BS : RExp->Bases) {
745 // Perform a single step derived-to-base conversion.
747 GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1,
748 /*NullCheckValue=*/false, SourceLocation());
749 LValue SubLV = MakeAddrLValue(Base, BS->getType());
751 // Recurse onto bases.
752 ExpandTypeFromArgs(BS->getType(), SubLV, AI);
754 for (auto FD : RExp->Fields) {
755 // FIXME: What are the right qualifiers here?
756 LValue SubLV = EmitLValueForField(LV, FD);
757 ExpandTypeFromArgs(FD->getType(), SubLV, AI);
759 } else if (auto CExp = dyn_cast<ComplexExpansion>(Exp.get())) {
760 llvm::Value *RealAddr =
761 Builder.CreateStructGEP(nullptr, LV.getAddress(), 0, "real");
762 EmitStoreThroughLValue(RValue::get(*AI++),
763 MakeAddrLValue(RealAddr, CExp->EltTy));
764 llvm::Value *ImagAddr =
765 Builder.CreateStructGEP(nullptr, LV.getAddress(), 1, "imag");
766 EmitStoreThroughLValue(RValue::get(*AI++),
767 MakeAddrLValue(ImagAddr, CExp->EltTy));
769 assert(isa<NoExpansion>(Exp.get()));
770 EmitStoreThroughLValue(RValue::get(*AI++), LV);
774 void CodeGenFunction::ExpandTypeToArgs(
775 QualType Ty, RValue RV, llvm::FunctionType *IRFuncTy,
776 SmallVectorImpl<llvm::Value *> &IRCallArgs, unsigned &IRCallArgPos) {
777 auto Exp = getTypeExpansion(Ty, getContext());
778 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
779 llvm::Value *Addr = RV.getAggregateAddr();
780 for (int i = 0, n = CAExp->NumElts; i < n; i++) {
781 llvm::Value *EltAddr = Builder.CreateConstGEP2_32(nullptr, Addr, 0, i);
783 convertTempToRValue(EltAddr, CAExp->EltTy, SourceLocation());
784 ExpandTypeToArgs(CAExp->EltTy, EltRV, IRFuncTy, IRCallArgs, IRCallArgPos);
786 } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
787 llvm::Value *This = RV.getAggregateAddr();
788 for (const CXXBaseSpecifier *BS : RExp->Bases) {
789 // Perform a single step derived-to-base conversion.
791 GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1,
792 /*NullCheckValue=*/false, SourceLocation());
793 RValue BaseRV = RValue::getAggregate(Base);
795 // Recurse onto bases.
796 ExpandTypeToArgs(BS->getType(), BaseRV, IRFuncTy, IRCallArgs,
800 LValue LV = MakeAddrLValue(This, Ty);
801 for (auto FD : RExp->Fields) {
802 RValue FldRV = EmitRValueForField(LV, FD, SourceLocation());
803 ExpandTypeToArgs(FD->getType(), FldRV, IRFuncTy, IRCallArgs,
806 } else if (isa<ComplexExpansion>(Exp.get())) {
807 ComplexPairTy CV = RV.getComplexVal();
808 IRCallArgs[IRCallArgPos++] = CV.first;
809 IRCallArgs[IRCallArgPos++] = CV.second;
811 assert(isa<NoExpansion>(Exp.get()));
812 assert(RV.isScalar() &&
813 "Unexpected non-scalar rvalue during struct expansion.");
815 // Insert a bitcast as needed.
816 llvm::Value *V = RV.getScalarVal();
817 if (IRCallArgPos < IRFuncTy->getNumParams() &&
818 V->getType() != IRFuncTy->getParamType(IRCallArgPos))
819 V = Builder.CreateBitCast(V, IRFuncTy->getParamType(IRCallArgPos));
821 IRCallArgs[IRCallArgPos++] = V;
825 /// EnterStructPointerForCoercedAccess - Given a struct pointer that we are
826 /// accessing some number of bytes out of it, try to gep into the struct to get
827 /// at its inner goodness. Dive as deep as possible without entering an element
828 /// with an in-memory size smaller than DstSize.
830 EnterStructPointerForCoercedAccess(llvm::Value *SrcPtr,
831 llvm::StructType *SrcSTy,
832 uint64_t DstSize, CodeGenFunction &CGF) {
833 // We can't dive into a zero-element struct.
834 if (SrcSTy->getNumElements() == 0) return SrcPtr;
836 llvm::Type *FirstElt = SrcSTy->getElementType(0);
838 // If the first elt is at least as large as what we're looking for, or if the
839 // first element is the same size as the whole struct, we can enter it. The
840 // comparison must be made on the store size and not the alloca size. Using
841 // the alloca size may overstate the size of the load.
842 uint64_t FirstEltSize =
843 CGF.CGM.getDataLayout().getTypeStoreSize(FirstElt);
844 if (FirstEltSize < DstSize &&
845 FirstEltSize < CGF.CGM.getDataLayout().getTypeStoreSize(SrcSTy))
848 // GEP into the first element.
849 SrcPtr = CGF.Builder.CreateConstGEP2_32(SrcSTy, SrcPtr, 0, 0, "coerce.dive");
851 // If the first element is a struct, recurse.
853 cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
854 if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy))
855 return EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF);
860 /// CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both
861 /// are either integers or pointers. This does a truncation of the value if it
862 /// is too large or a zero extension if it is too small.
864 /// This behaves as if the value were coerced through memory, so on big-endian
865 /// targets the high bits are preserved in a truncation, while little-endian
866 /// targets preserve the low bits.
867 static llvm::Value *CoerceIntOrPtrToIntOrPtr(llvm::Value *Val,
869 CodeGenFunction &CGF) {
870 if (Val->getType() == Ty)
873 if (isa<llvm::PointerType>(Val->getType())) {
874 // If this is Pointer->Pointer avoid conversion to and from int.
875 if (isa<llvm::PointerType>(Ty))
876 return CGF.Builder.CreateBitCast(Val, Ty, "coerce.val");
878 // Convert the pointer to an integer so we can play with its width.
879 Val = CGF.Builder.CreatePtrToInt(Val, CGF.IntPtrTy, "coerce.val.pi");
882 llvm::Type *DestIntTy = Ty;
883 if (isa<llvm::PointerType>(DestIntTy))
884 DestIntTy = CGF.IntPtrTy;
886 if (Val->getType() != DestIntTy) {
887 const llvm::DataLayout &DL = CGF.CGM.getDataLayout();
888 if (DL.isBigEndian()) {
889 // Preserve the high bits on big-endian targets.
890 // That is what memory coercion does.
891 uint64_t SrcSize = DL.getTypeSizeInBits(Val->getType());
892 uint64_t DstSize = DL.getTypeSizeInBits(DestIntTy);
894 if (SrcSize > DstSize) {
895 Val = CGF.Builder.CreateLShr(Val, SrcSize - DstSize, "coerce.highbits");
896 Val = CGF.Builder.CreateTrunc(Val, DestIntTy, "coerce.val.ii");
898 Val = CGF.Builder.CreateZExt(Val, DestIntTy, "coerce.val.ii");
899 Val = CGF.Builder.CreateShl(Val, DstSize - SrcSize, "coerce.highbits");
902 // Little-endian targets preserve the low bits. No shifts required.
903 Val = CGF.Builder.CreateIntCast(Val, DestIntTy, false, "coerce.val.ii");
907 if (isa<llvm::PointerType>(Ty))
908 Val = CGF.Builder.CreateIntToPtr(Val, Ty, "coerce.val.ip");
914 /// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as
915 /// a pointer to an object of type \arg Ty, known to be aligned to
916 /// \arg SrcAlign bytes.
918 /// This safely handles the case when the src type is smaller than the
919 /// destination type; in this situation the values of bits which not
920 /// present in the src are undefined.
921 static llvm::Value *CreateCoercedLoad(llvm::Value *SrcPtr,
922 llvm::Type *Ty, CharUnits SrcAlign,
923 CodeGenFunction &CGF) {
925 cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
927 // If SrcTy and Ty are the same, just do a load.
929 return CGF.Builder.CreateAlignedLoad(SrcPtr, SrcAlign.getQuantity());
931 uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(Ty);
933 if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) {
934 SrcPtr = EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF);
935 SrcTy = cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
938 uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);
940 // If the source and destination are integer or pointer types, just do an
941 // extension or truncation to the desired type.
942 if ((isa<llvm::IntegerType>(Ty) || isa<llvm::PointerType>(Ty)) &&
943 (isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy))) {
944 llvm::LoadInst *Load =
945 CGF.Builder.CreateAlignedLoad(SrcPtr, SrcAlign.getQuantity());
946 return CoerceIntOrPtrToIntOrPtr(Load, Ty, CGF);
949 // If load is legal, just bitcast the src pointer.
950 if (SrcSize >= DstSize) {
951 // Generally SrcSize is never greater than DstSize, since this means we are
952 // losing bits. However, this can happen in cases where the structure has
953 // additional padding, for example due to a user specified alignment.
955 // FIXME: Assert that we aren't truncating non-padding bits when have access
956 // to that information.
957 llvm::Value *Casted =
958 CGF.Builder.CreateBitCast(SrcPtr, llvm::PointerType::getUnqual(Ty));
959 return CGF.Builder.CreateAlignedLoad(Casted, SrcAlign.getQuantity());
962 // Otherwise do coercion through memory. This is stupid, but
964 llvm::AllocaInst *Tmp = CGF.CreateTempAlloca(Ty);
965 Tmp->setAlignment(SrcAlign.getQuantity());
966 llvm::Type *I8PtrTy = CGF.Builder.getInt8PtrTy();
967 llvm::Value *Casted = CGF.Builder.CreateBitCast(Tmp, I8PtrTy);
968 llvm::Value *SrcCasted = CGF.Builder.CreateBitCast(SrcPtr, I8PtrTy);
969 CGF.Builder.CreateMemCpy(Casted, SrcCasted,
970 llvm::ConstantInt::get(CGF.IntPtrTy, SrcSize),
971 SrcAlign.getQuantity(), false);
972 return CGF.Builder.CreateAlignedLoad(Tmp, SrcAlign.getQuantity());
975 // Function to store a first-class aggregate into memory. We prefer to
976 // store the elements rather than the aggregate to be more friendly to
978 // FIXME: Do we need to recurse here?
979 static void BuildAggStore(CodeGenFunction &CGF, llvm::Value *Val,
980 llvm::Value *DestPtr, bool DestIsVolatile,
981 CharUnits DestAlign) {
982 // Prefer scalar stores to first-class aggregate stores.
983 if (llvm::StructType *STy =
984 dyn_cast<llvm::StructType>(Val->getType())) {
985 const llvm::StructLayout *Layout =
986 CGF.CGM.getDataLayout().getStructLayout(STy);
988 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
989 llvm::Value *EltPtr = CGF.Builder.CreateConstGEP2_32(STy, DestPtr, 0, i);
990 llvm::Value *Elt = CGF.Builder.CreateExtractValue(Val, i);
991 uint64_t EltOffset = Layout->getElementOffset(i);
993 DestAlign.alignmentAtOffset(CharUnits::fromQuantity(EltOffset));
994 CGF.Builder.CreateAlignedStore(Elt, EltPtr, EltAlign.getQuantity(),
998 CGF.Builder.CreateAlignedStore(Val, DestPtr, DestAlign.getQuantity(),
1003 /// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src,
1004 /// where the source and destination may have different types. The
1005 /// destination is known to be aligned to \arg DstAlign bytes.
1007 /// This safely handles the case when the src type is larger than the
1008 /// destination type; the upper bits of the src will be lost.
1009 static void CreateCoercedStore(llvm::Value *Src,
1010 llvm::Value *DstPtr,
1013 CodeGenFunction &CGF) {
1014 llvm::Type *SrcTy = Src->getType();
1016 cast<llvm::PointerType>(DstPtr->getType())->getElementType();
1017 if (SrcTy == DstTy) {
1018 CGF.Builder.CreateAlignedStore(Src, DstPtr, DstAlign.getQuantity(),
1023 uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);
1025 if (llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(DstTy)) {
1026 DstPtr = EnterStructPointerForCoercedAccess(DstPtr, DstSTy, SrcSize, CGF);
1027 DstTy = cast<llvm::PointerType>(DstPtr->getType())->getElementType();
1030 // If the source and destination are integer or pointer types, just do an
1031 // extension or truncation to the desired type.
1032 if ((isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy)) &&
1033 (isa<llvm::IntegerType>(DstTy) || isa<llvm::PointerType>(DstTy))) {
1034 Src = CoerceIntOrPtrToIntOrPtr(Src, DstTy, CGF);
1035 CGF.Builder.CreateAlignedStore(Src, DstPtr, DstAlign.getQuantity(),
1040 uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(DstTy);
1042 // If store is legal, just bitcast the src pointer.
1043 if (SrcSize <= DstSize) {
1044 llvm::Value *Casted =
1045 CGF.Builder.CreateBitCast(DstPtr, llvm::PointerType::getUnqual(SrcTy));
1046 BuildAggStore(CGF, Src, Casted, DstIsVolatile, DstAlign);
1048 // Otherwise do coercion through memory. This is stupid, but
1051 // Generally SrcSize is never greater than DstSize, since this means we are
1052 // losing bits. However, this can happen in cases where the structure has
1053 // additional padding, for example due to a user specified alignment.
1055 // FIXME: Assert that we aren't truncating non-padding bits when have access
1056 // to that information.
1057 llvm::AllocaInst *Tmp = CGF.CreateTempAlloca(SrcTy);
1058 Tmp->setAlignment(DstAlign.getQuantity());
1059 CGF.Builder.CreateAlignedStore(Src, Tmp, DstAlign.getQuantity());
1060 llvm::Type *I8PtrTy = CGF.Builder.getInt8PtrTy();
1061 llvm::Value *Casted = CGF.Builder.CreateBitCast(Tmp, I8PtrTy);
1062 llvm::Value *DstCasted = CGF.Builder.CreateBitCast(DstPtr, I8PtrTy);
1063 CGF.Builder.CreateMemCpy(DstCasted, Casted,
1064 llvm::ConstantInt::get(CGF.IntPtrTy, DstSize),
1065 DstAlign.getQuantity(), false);
1071 /// Encapsulates information about the way function arguments from
1072 /// CGFunctionInfo should be passed to actual LLVM IR function.
1073 class ClangToLLVMArgMapping {
1074 static const unsigned InvalidIndex = ~0U;
1075 unsigned InallocaArgNo;
1077 unsigned TotalIRArgs;
1079 /// Arguments of LLVM IR function corresponding to single Clang argument.
1081 unsigned PaddingArgIndex;
1082 // Argument is expanded to IR arguments at positions
1083 // [FirstArgIndex, FirstArgIndex + NumberOfArgs).
1084 unsigned FirstArgIndex;
1085 unsigned NumberOfArgs;
1088 : PaddingArgIndex(InvalidIndex), FirstArgIndex(InvalidIndex),
1092 SmallVector<IRArgs, 8> ArgInfo;
1095 ClangToLLVMArgMapping(const ASTContext &Context, const CGFunctionInfo &FI,
1096 bool OnlyRequiredArgs = false)
1097 : InallocaArgNo(InvalidIndex), SRetArgNo(InvalidIndex), TotalIRArgs(0),
1098 ArgInfo(OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size()) {
1099 construct(Context, FI, OnlyRequiredArgs);
1102 bool hasInallocaArg() const { return InallocaArgNo != InvalidIndex; }
1103 unsigned getInallocaArgNo() const {
1104 assert(hasInallocaArg());
1105 return InallocaArgNo;
1108 bool hasSRetArg() const { return SRetArgNo != InvalidIndex; }
1109 unsigned getSRetArgNo() const {
1110 assert(hasSRetArg());
1114 unsigned totalIRArgs() const { return TotalIRArgs; }
1116 bool hasPaddingArg(unsigned ArgNo) const {
1117 assert(ArgNo < ArgInfo.size());
1118 return ArgInfo[ArgNo].PaddingArgIndex != InvalidIndex;
1120 unsigned getPaddingArgNo(unsigned ArgNo) const {
1121 assert(hasPaddingArg(ArgNo));
1122 return ArgInfo[ArgNo].PaddingArgIndex;
1125 /// Returns index of first IR argument corresponding to ArgNo, and their
1127 std::pair<unsigned, unsigned> getIRArgs(unsigned ArgNo) const {
1128 assert(ArgNo < ArgInfo.size());
1129 return std::make_pair(ArgInfo[ArgNo].FirstArgIndex,
1130 ArgInfo[ArgNo].NumberOfArgs);
1134 void construct(const ASTContext &Context, const CGFunctionInfo &FI,
1135 bool OnlyRequiredArgs);
1138 void ClangToLLVMArgMapping::construct(const ASTContext &Context,
1139 const CGFunctionInfo &FI,
1140 bool OnlyRequiredArgs) {
1141 unsigned IRArgNo = 0;
1142 bool SwapThisWithSRet = false;
1143 const ABIArgInfo &RetAI = FI.getReturnInfo();
1145 if (RetAI.getKind() == ABIArgInfo::Indirect) {
1146 SwapThisWithSRet = RetAI.isSRetAfterThis();
1147 SRetArgNo = SwapThisWithSRet ? 1 : IRArgNo++;
1151 unsigned NumArgs = OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size();
1152 for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(); ArgNo < NumArgs;
1154 assert(I != FI.arg_end());
1155 QualType ArgType = I->type;
1156 const ABIArgInfo &AI = I->info;
1157 // Collect data about IR arguments corresponding to Clang argument ArgNo.
1158 auto &IRArgs = ArgInfo[ArgNo];
1160 if (AI.getPaddingType())
1161 IRArgs.PaddingArgIndex = IRArgNo++;
1163 switch (AI.getKind()) {
1164 case ABIArgInfo::Extend:
1165 case ABIArgInfo::Direct: {
1166 // FIXME: handle sseregparm someday...
1167 llvm::StructType *STy = dyn_cast<llvm::StructType>(AI.getCoerceToType());
1168 if (AI.isDirect() && AI.getCanBeFlattened() && STy) {
1169 IRArgs.NumberOfArgs = STy->getNumElements();
1171 IRArgs.NumberOfArgs = 1;
1175 case ABIArgInfo::Indirect:
1176 IRArgs.NumberOfArgs = 1;
1178 case ABIArgInfo::Ignore:
1179 case ABIArgInfo::InAlloca:
1180 // ignore and inalloca doesn't have matching LLVM parameters.
1181 IRArgs.NumberOfArgs = 0;
1183 case ABIArgInfo::Expand: {
1184 IRArgs.NumberOfArgs = getExpansionSize(ArgType, Context);
1189 if (IRArgs.NumberOfArgs > 0) {
1190 IRArgs.FirstArgIndex = IRArgNo;
1191 IRArgNo += IRArgs.NumberOfArgs;
1194 // Skip over the sret parameter when it comes second. We already handled it
1196 if (IRArgNo == 1 && SwapThisWithSRet)
1199 assert(ArgNo == ArgInfo.size());
1201 if (FI.usesInAlloca())
1202 InallocaArgNo = IRArgNo++;
1204 TotalIRArgs = IRArgNo;
1210 bool CodeGenModule::ReturnTypeUsesSRet(const CGFunctionInfo &FI) {
1211 return FI.getReturnInfo().isIndirect();
1214 bool CodeGenModule::ReturnSlotInterferesWithArgs(const CGFunctionInfo &FI) {
1215 return ReturnTypeUsesSRet(FI) &&
1216 getTargetCodeGenInfo().doesReturnSlotInterfereWithArgs();
1219 bool CodeGenModule::ReturnTypeUsesFPRet(QualType ResultType) {
1220 if (const BuiltinType *BT = ResultType->getAs<BuiltinType>()) {
1221 switch (BT->getKind()) {
1224 case BuiltinType::Float:
1225 return getTarget().useObjCFPRetForRealType(TargetInfo::Float);
1226 case BuiltinType::Double:
1227 return getTarget().useObjCFPRetForRealType(TargetInfo::Double);
1228 case BuiltinType::LongDouble:
1229 return getTarget().useObjCFPRetForRealType(TargetInfo::LongDouble);
1236 bool CodeGenModule::ReturnTypeUsesFP2Ret(QualType ResultType) {
1237 if (const ComplexType *CT = ResultType->getAs<ComplexType>()) {
1238 if (const BuiltinType *BT = CT->getElementType()->getAs<BuiltinType>()) {
1239 if (BT->getKind() == BuiltinType::LongDouble)
1240 return getTarget().useObjCFP2RetForComplexLongDouble();
1247 llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) {
1248 const CGFunctionInfo &FI = arrangeGlobalDeclaration(GD);
1249 return GetFunctionType(FI);
1252 llvm::FunctionType *
1253 CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI) {
1255 bool Inserted = FunctionsBeingProcessed.insert(&FI).second;
1257 assert(Inserted && "Recursively being processed?");
1259 llvm::Type *resultType = nullptr;
1260 const ABIArgInfo &retAI = FI.getReturnInfo();
1261 switch (retAI.getKind()) {
1262 case ABIArgInfo::Expand:
1263 llvm_unreachable("Invalid ABI kind for return argument");
1265 case ABIArgInfo::Extend:
1266 case ABIArgInfo::Direct:
1267 resultType = retAI.getCoerceToType();
1270 case ABIArgInfo::InAlloca:
1271 if (retAI.getInAllocaSRet()) {
1272 // sret things on win32 aren't void, they return the sret pointer.
1273 QualType ret = FI.getReturnType();
1274 llvm::Type *ty = ConvertType(ret);
1275 unsigned addressSpace = Context.getTargetAddressSpace(ret);
1276 resultType = llvm::PointerType::get(ty, addressSpace);
1278 resultType = llvm::Type::getVoidTy(getLLVMContext());
1282 case ABIArgInfo::Indirect: {
1283 assert(!retAI.getIndirectAlign() && "Align unused on indirect return.");
1284 resultType = llvm::Type::getVoidTy(getLLVMContext());
1288 case ABIArgInfo::Ignore:
1289 resultType = llvm::Type::getVoidTy(getLLVMContext());
1293 ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI, true);
1294 SmallVector<llvm::Type*, 8> ArgTypes(IRFunctionArgs.totalIRArgs());
1296 // Add type for sret argument.
1297 if (IRFunctionArgs.hasSRetArg()) {
1298 QualType Ret = FI.getReturnType();
1299 llvm::Type *Ty = ConvertType(Ret);
1300 unsigned AddressSpace = Context.getTargetAddressSpace(Ret);
1301 ArgTypes[IRFunctionArgs.getSRetArgNo()] =
1302 llvm::PointerType::get(Ty, AddressSpace);
1305 // Add type for inalloca argument.
1306 if (IRFunctionArgs.hasInallocaArg()) {
1307 auto ArgStruct = FI.getArgStruct();
1309 ArgTypes[IRFunctionArgs.getInallocaArgNo()] = ArgStruct->getPointerTo();
1312 // Add in all of the required arguments.
1314 CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
1315 ie = it + FI.getNumRequiredArgs();
1316 for (; it != ie; ++it, ++ArgNo) {
1317 const ABIArgInfo &ArgInfo = it->info;
1319 // Insert a padding type to ensure proper alignment.
1320 if (IRFunctionArgs.hasPaddingArg(ArgNo))
1321 ArgTypes[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
1322 ArgInfo.getPaddingType();
1324 unsigned FirstIRArg, NumIRArgs;
1325 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
1327 switch (ArgInfo.getKind()) {
1328 case ABIArgInfo::Ignore:
1329 case ABIArgInfo::InAlloca:
1330 assert(NumIRArgs == 0);
1333 case ABIArgInfo::Indirect: {
1334 assert(NumIRArgs == 1);
1335 // indirect arguments are always on the stack, which is addr space #0.
1336 llvm::Type *LTy = ConvertTypeForMem(it->type);
1337 ArgTypes[FirstIRArg] = LTy->getPointerTo();
1341 case ABIArgInfo::Extend:
1342 case ABIArgInfo::Direct: {
1343 // Fast-isel and the optimizer generally like scalar values better than
1344 // FCAs, so we flatten them if this is safe to do for this argument.
1345 llvm::Type *argType = ArgInfo.getCoerceToType();
1346 llvm::StructType *st = dyn_cast<llvm::StructType>(argType);
1347 if (st && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) {
1348 assert(NumIRArgs == st->getNumElements());
1349 for (unsigned i = 0, e = st->getNumElements(); i != e; ++i)
1350 ArgTypes[FirstIRArg + i] = st->getElementType(i);
1352 assert(NumIRArgs == 1);
1353 ArgTypes[FirstIRArg] = argType;
1358 case ABIArgInfo::Expand:
1359 auto ArgTypesIter = ArgTypes.begin() + FirstIRArg;
1360 getExpandedTypes(it->type, ArgTypesIter);
1361 assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs);
1366 bool Erased = FunctionsBeingProcessed.erase(&FI); (void)Erased;
1367 assert(Erased && "Not in set?");
1369 return llvm::FunctionType::get(resultType, ArgTypes, FI.isVariadic());
1372 llvm::Type *CodeGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) {
1373 const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
1374 const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
1376 if (!isFuncTypeConvertible(FPT))
1377 return llvm::StructType::get(getLLVMContext());
1379 const CGFunctionInfo *Info;
1380 if (isa<CXXDestructorDecl>(MD))
1382 &arrangeCXXStructorDeclaration(MD, getFromDtorType(GD.getDtorType()));
1384 Info = &arrangeCXXMethodDeclaration(MD);
1385 return GetFunctionType(*Info);
1388 void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
1389 const Decl *TargetDecl,
1390 AttributeListType &PAL,
1391 unsigned &CallingConv,
1392 bool AttrOnCallSite) {
1393 llvm::AttrBuilder FuncAttrs;
1394 llvm::AttrBuilder RetAttrs;
1395 bool HasOptnone = false;
1397 CallingConv = FI.getEffectiveCallingConvention();
1399 if (FI.isNoReturn())
1400 FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
1402 // FIXME: handle sseregparm someday...
1404 if (TargetDecl->hasAttr<ReturnsTwiceAttr>())
1405 FuncAttrs.addAttribute(llvm::Attribute::ReturnsTwice);
1406 if (TargetDecl->hasAttr<NoThrowAttr>())
1407 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1408 if (TargetDecl->hasAttr<NoReturnAttr>())
1409 FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
1410 if (TargetDecl->hasAttr<NoDuplicateAttr>())
1411 FuncAttrs.addAttribute(llvm::Attribute::NoDuplicate);
1413 if (const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
1414 const FunctionProtoType *FPT = Fn->getType()->getAs<FunctionProtoType>();
1415 if (FPT && FPT->isNothrow(getContext()))
1416 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1417 // Don't use [[noreturn]] or _Noreturn for a call to a virtual function.
1418 // These attributes are not inherited by overloads.
1419 const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Fn);
1420 if (Fn->isNoReturn() && !(AttrOnCallSite && MD && MD->isVirtual()))
1421 FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
1424 // 'const' and 'pure' attribute functions are also nounwind.
1425 if (TargetDecl->hasAttr<ConstAttr>()) {
1426 FuncAttrs.addAttribute(llvm::Attribute::ReadNone);
1427 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1428 } else if (TargetDecl->hasAttr<PureAttr>()) {
1429 FuncAttrs.addAttribute(llvm::Attribute::ReadOnly);
1430 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1432 if (TargetDecl->hasAttr<RestrictAttr>())
1433 RetAttrs.addAttribute(llvm::Attribute::NoAlias);
1434 if (TargetDecl->hasAttr<ReturnsNonNullAttr>())
1435 RetAttrs.addAttribute(llvm::Attribute::NonNull);
1437 HasOptnone = TargetDecl->hasAttr<OptimizeNoneAttr>();
1440 // OptimizeNoneAttr takes precedence over -Os or -Oz. No warning needed.
1442 if (CodeGenOpts.OptimizeSize)
1443 FuncAttrs.addAttribute(llvm::Attribute::OptimizeForSize);
1444 if (CodeGenOpts.OptimizeSize == 2)
1445 FuncAttrs.addAttribute(llvm::Attribute::MinSize);
1448 if (CodeGenOpts.DisableRedZone)
1449 FuncAttrs.addAttribute(llvm::Attribute::NoRedZone);
1450 if (CodeGenOpts.NoImplicitFloat)
1451 FuncAttrs.addAttribute(llvm::Attribute::NoImplicitFloat);
1452 if (CodeGenOpts.EnableSegmentedStacks &&
1453 !(TargetDecl && TargetDecl->hasAttr<NoSplitStackAttr>()))
1454 FuncAttrs.addAttribute("split-stack");
1456 if (AttrOnCallSite) {
1457 // Attributes that should go on the call site only.
1458 if (!CodeGenOpts.SimplifyLibCalls)
1459 FuncAttrs.addAttribute(llvm::Attribute::NoBuiltin);
1460 if (!CodeGenOpts.TrapFuncName.empty())
1461 FuncAttrs.addAttribute("trap-func-name", CodeGenOpts.TrapFuncName);
1463 // Attributes that should go on the function, but not the call site.
1464 if (!CodeGenOpts.DisableFPElim) {
1465 FuncAttrs.addAttribute("no-frame-pointer-elim", "false");
1466 } else if (CodeGenOpts.OmitLeafFramePointer) {
1467 FuncAttrs.addAttribute("no-frame-pointer-elim", "false");
1468 FuncAttrs.addAttribute("no-frame-pointer-elim-non-leaf");
1470 FuncAttrs.addAttribute("no-frame-pointer-elim", "true");
1471 FuncAttrs.addAttribute("no-frame-pointer-elim-non-leaf");
1474 FuncAttrs.addAttribute("disable-tail-calls",
1475 llvm::toStringRef(CodeGenOpts.DisableTailCalls));
1476 FuncAttrs.addAttribute("less-precise-fpmad",
1477 llvm::toStringRef(CodeGenOpts.LessPreciseFPMAD));
1478 FuncAttrs.addAttribute("no-infs-fp-math",
1479 llvm::toStringRef(CodeGenOpts.NoInfsFPMath));
1480 FuncAttrs.addAttribute("no-nans-fp-math",
1481 llvm::toStringRef(CodeGenOpts.NoNaNsFPMath));
1482 FuncAttrs.addAttribute("unsafe-fp-math",
1483 llvm::toStringRef(CodeGenOpts.UnsafeFPMath));
1484 FuncAttrs.addAttribute("use-soft-float",
1485 llvm::toStringRef(CodeGenOpts.SoftFloat));
1486 FuncAttrs.addAttribute("stack-protector-buffer-size",
1487 llvm::utostr(CodeGenOpts.SSPBufferSize));
1489 if (!CodeGenOpts.StackRealignment)
1490 FuncAttrs.addAttribute("no-realign-stack");
1492 // Add target-cpu and target-features attributes to functions. If
1493 // we have a decl for the function and it has a target attribute then
1494 // parse that and add it to the feature set.
1495 StringRef TargetCPU = getTarget().getTargetOpts().CPU;
1497 // TODO: Features gets us the features on the command line including
1498 // feature dependencies. For canonicalization purposes we might want to
1499 // avoid putting features in the target-features set if we know it'll be
1500 // one of the default features in the backend, e.g. corei7-avx and +avx or
1501 // figure out non-explicit dependencies.
1502 // Canonicalize the existing features in a new feature map.
1503 // TODO: Migrate the existing backends to keep the map around rather than
1505 llvm::StringMap<bool> FeatureMap;
1506 for (auto F : getTarget().getTargetOpts().Features) {
1507 const char *Name = F.c_str();
1508 bool Enabled = Name[0] == '+';
1509 getTarget().setFeatureEnabled(FeatureMap, Name + 1, Enabled);
1512 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl);
1514 if (const auto *TD = FD->getAttr<TargetAttr>()) {
1515 StringRef FeaturesStr = TD->getFeatures();
1516 SmallVector<StringRef, 1> AttrFeatures;
1517 FeaturesStr.split(AttrFeatures, ",");
1519 // Grab the various features and prepend a "+" to turn on the feature to
1520 // the backend and add them to our existing set of features.
1521 for (auto &Feature : AttrFeatures) {
1522 // Go ahead and trim whitespace rather than either erroring or
1523 // accepting it weirdly.
1524 Feature = Feature.trim();
1526 // While we're here iterating check for a different target cpu.
1527 if (Feature.startswith("arch="))
1528 TargetCPU = Feature.split("=").second.trim();
1529 else if (Feature.startswith("tune="))
1530 // We don't support cpu tuning this way currently.
1532 else if (Feature.startswith("fpmath="))
1533 // TODO: Support the fpmath option this way. It will require checking
1534 // overall feature validity for the function with the rest of the
1535 // attributes on the function.
1537 else if (Feature.startswith("mno-"))
1538 getTarget().setFeatureEnabled(FeatureMap, Feature.split("-").second,
1541 getTarget().setFeatureEnabled(FeatureMap, Feature, true);
1546 // Produce the canonical string for this set of features.
1547 std::vector<std::string> Features;
1548 for (llvm::StringMap<bool>::const_iterator it = FeatureMap.begin(),
1549 ie = FeatureMap.end();
1551 Features.push_back((it->second ? "+" : "-") + it->first().str());
1553 // Now add the target-cpu and target-features to the function.
1554 if (TargetCPU != "")
1555 FuncAttrs.addAttribute("target-cpu", TargetCPU);
1556 if (!Features.empty()) {
1557 std::sort(Features.begin(), Features.end());
1558 FuncAttrs.addAttribute("target-features",
1559 llvm::join(Features.begin(), Features.end(), ","));
1563 ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI);
1565 QualType RetTy = FI.getReturnType();
1566 const ABIArgInfo &RetAI = FI.getReturnInfo();
1567 switch (RetAI.getKind()) {
1568 case ABIArgInfo::Extend:
1569 if (RetTy->hasSignedIntegerRepresentation())
1570 RetAttrs.addAttribute(llvm::Attribute::SExt);
1571 else if (RetTy->hasUnsignedIntegerRepresentation())
1572 RetAttrs.addAttribute(llvm::Attribute::ZExt);
1574 case ABIArgInfo::Direct:
1575 if (RetAI.getInReg())
1576 RetAttrs.addAttribute(llvm::Attribute::InReg);
1578 case ABIArgInfo::Ignore:
1581 case ABIArgInfo::InAlloca:
1582 case ABIArgInfo::Indirect: {
1583 // inalloca and sret disable readnone and readonly
1584 FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
1585 .removeAttribute(llvm::Attribute::ReadNone);
1589 case ABIArgInfo::Expand:
1590 llvm_unreachable("Invalid ABI kind for return argument");
1593 if (const auto *RefTy = RetTy->getAs<ReferenceType>()) {
1594 QualType PTy = RefTy->getPointeeType();
1595 if (!PTy->isIncompleteType() && PTy->isConstantSizeType())
1596 RetAttrs.addDereferenceableAttr(getContext().getTypeSizeInChars(PTy)
1598 else if (getContext().getTargetAddressSpace(PTy) == 0)
1599 RetAttrs.addAttribute(llvm::Attribute::NonNull);
1602 // Attach return attributes.
1603 if (RetAttrs.hasAttributes()) {
1604 PAL.push_back(llvm::AttributeSet::get(
1605 getLLVMContext(), llvm::AttributeSet::ReturnIndex, RetAttrs));
1608 // Attach attributes to sret.
1609 if (IRFunctionArgs.hasSRetArg()) {
1610 llvm::AttrBuilder SRETAttrs;
1611 SRETAttrs.addAttribute(llvm::Attribute::StructRet);
1612 if (RetAI.getInReg())
1613 SRETAttrs.addAttribute(llvm::Attribute::InReg);
1614 PAL.push_back(llvm::AttributeSet::get(
1615 getLLVMContext(), IRFunctionArgs.getSRetArgNo() + 1, SRETAttrs));
1618 // Attach attributes to inalloca argument.
1619 if (IRFunctionArgs.hasInallocaArg()) {
1620 llvm::AttrBuilder Attrs;
1621 Attrs.addAttribute(llvm::Attribute::InAlloca);
1622 PAL.push_back(llvm::AttributeSet::get(
1623 getLLVMContext(), IRFunctionArgs.getInallocaArgNo() + 1, Attrs));
1627 for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(),
1629 I != E; ++I, ++ArgNo) {
1630 QualType ParamType = I->type;
1631 const ABIArgInfo &AI = I->info;
1632 llvm::AttrBuilder Attrs;
1634 // Add attribute for padding argument, if necessary.
1635 if (IRFunctionArgs.hasPaddingArg(ArgNo)) {
1636 if (AI.getPaddingInReg())
1637 PAL.push_back(llvm::AttributeSet::get(
1638 getLLVMContext(), IRFunctionArgs.getPaddingArgNo(ArgNo) + 1,
1639 llvm::Attribute::InReg));
1642 // 'restrict' -> 'noalias' is done in EmitFunctionProlog when we
1643 // have the corresponding parameter variable. It doesn't make
1644 // sense to do it here because parameters are so messed up.
1645 switch (AI.getKind()) {
1646 case ABIArgInfo::Extend:
1647 if (ParamType->isSignedIntegerOrEnumerationType())
1648 Attrs.addAttribute(llvm::Attribute::SExt);
1649 else if (ParamType->isUnsignedIntegerOrEnumerationType()) {
1650 if (getTypes().getABIInfo().shouldSignExtUnsignedType(ParamType))
1651 Attrs.addAttribute(llvm::Attribute::SExt);
1653 Attrs.addAttribute(llvm::Attribute::ZExt);
1656 case ABIArgInfo::Direct:
1657 if (ArgNo == 0 && FI.isChainCall())
1658 Attrs.addAttribute(llvm::Attribute::Nest);
1659 else if (AI.getInReg())
1660 Attrs.addAttribute(llvm::Attribute::InReg);
1663 case ABIArgInfo::Indirect:
1665 Attrs.addAttribute(llvm::Attribute::InReg);
1667 if (AI.getIndirectByVal())
1668 Attrs.addAttribute(llvm::Attribute::ByVal);
1670 Attrs.addAlignmentAttr(AI.getIndirectAlign());
1672 // byval disables readnone and readonly.
1673 FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
1674 .removeAttribute(llvm::Attribute::ReadNone);
1677 case ABIArgInfo::Ignore:
1678 case ABIArgInfo::Expand:
1681 case ABIArgInfo::InAlloca:
1682 // inalloca disables readnone and readonly.
1683 FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
1684 .removeAttribute(llvm::Attribute::ReadNone);
1688 if (const auto *RefTy = ParamType->getAs<ReferenceType>()) {
1689 QualType PTy = RefTy->getPointeeType();
1690 if (!PTy->isIncompleteType() && PTy->isConstantSizeType())
1691 Attrs.addDereferenceableAttr(getContext().getTypeSizeInChars(PTy)
1693 else if (getContext().getTargetAddressSpace(PTy) == 0)
1694 Attrs.addAttribute(llvm::Attribute::NonNull);
1697 if (Attrs.hasAttributes()) {
1698 unsigned FirstIRArg, NumIRArgs;
1699 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
1700 for (unsigned i = 0; i < NumIRArgs; i++)
1701 PAL.push_back(llvm::AttributeSet::get(getLLVMContext(),
1702 FirstIRArg + i + 1, Attrs));
1705 assert(ArgNo == FI.arg_size());
1707 if (FuncAttrs.hasAttributes())
1708 PAL.push_back(llvm::
1709 AttributeSet::get(getLLVMContext(),
1710 llvm::AttributeSet::FunctionIndex,
1714 /// An argument came in as a promoted argument; demote it back to its
1716 static llvm::Value *emitArgumentDemotion(CodeGenFunction &CGF,
1718 llvm::Value *value) {
1719 llvm::Type *varType = CGF.ConvertType(var->getType());
1721 // This can happen with promotions that actually don't change the
1722 // underlying type, like the enum promotions.
1723 if (value->getType() == varType) return value;
1725 assert((varType->isIntegerTy() || varType->isFloatingPointTy())
1726 && "unexpected promotion type");
1728 if (isa<llvm::IntegerType>(varType))
1729 return CGF.Builder.CreateTrunc(value, varType, "arg.unpromote");
1731 return CGF.Builder.CreateFPCast(value, varType, "arg.unpromote");
1734 /// Returns the attribute (either parameter attribute, or function
1735 /// attribute), which declares argument ArgNo to be non-null.
1736 static const NonNullAttr *getNonNullAttr(const Decl *FD, const ParmVarDecl *PVD,
1737 QualType ArgType, unsigned ArgNo) {
1738 // FIXME: __attribute__((nonnull)) can also be applied to:
1739 // - references to pointers, where the pointee is known to be
1740 // nonnull (apparently a Clang extension)
1741 // - transparent unions containing pointers
1742 // In the former case, LLVM IR cannot represent the constraint. In
1743 // the latter case, we have no guarantee that the transparent union
1744 // is in fact passed as a pointer.
1745 if (!ArgType->isAnyPointerType() && !ArgType->isBlockPointerType())
1747 // First, check attribute on parameter itself.
1749 if (auto ParmNNAttr = PVD->getAttr<NonNullAttr>())
1752 // Check function attributes.
1755 for (const auto *NNAttr : FD->specific_attrs<NonNullAttr>()) {
1756 if (NNAttr->isNonNull(ArgNo))
1762 void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
1764 const FunctionArgList &Args) {
1765 if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>())
1766 // Naked functions don't have prologues.
1769 // If this is an implicit-return-zero function, go ahead and
1770 // initialize the return value. TODO: it might be nice to have
1771 // a more general mechanism for this that didn't require synthesized
1772 // return statements.
1773 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurCodeDecl)) {
1774 if (FD->hasImplicitReturnZero()) {
1775 QualType RetTy = FD->getReturnType().getUnqualifiedType();
1776 llvm::Type* LLVMTy = CGM.getTypes().ConvertType(RetTy);
1777 llvm::Constant* Zero = llvm::Constant::getNullValue(LLVMTy);
1778 Builder.CreateStore(Zero, ReturnValue);
1782 // FIXME: We no longer need the types from FunctionArgList; lift up and
1785 ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), FI);
1786 // Flattened function arguments.
1787 SmallVector<llvm::Argument *, 16> FnArgs;
1788 FnArgs.reserve(IRFunctionArgs.totalIRArgs());
1789 for (auto &Arg : Fn->args()) {
1790 FnArgs.push_back(&Arg);
1792 assert(FnArgs.size() == IRFunctionArgs.totalIRArgs());
1794 // If we're using inalloca, all the memory arguments are GEPs off of the last
1795 // parameter, which is a pointer to the complete memory area.
1796 llvm::Value *ArgStruct = nullptr;
1797 if (IRFunctionArgs.hasInallocaArg()) {
1798 ArgStruct = FnArgs[IRFunctionArgs.getInallocaArgNo()];
1799 assert(ArgStruct->getType() == FI.getArgStruct()->getPointerTo());
1802 // Name the struct return parameter.
1803 if (IRFunctionArgs.hasSRetArg()) {
1804 auto AI = FnArgs[IRFunctionArgs.getSRetArgNo()];
1805 AI->setName("agg.result");
1806 AI->addAttr(llvm::AttributeSet::get(getLLVMContext(), AI->getArgNo() + 1,
1807 llvm::Attribute::NoAlias));
1810 // Track if we received the parameter as a pointer (indirect, byval, or
1811 // inalloca). If already have a pointer, EmitParmDecl doesn't need to copy it
1812 // into a local alloca for us.
1813 enum ValOrPointer { HaveValue = 0, HavePointer = 1 };
1814 typedef llvm::PointerIntPair<llvm::Value *, 1> ValueAndIsPtr;
1815 SmallVector<ValueAndIsPtr, 16> ArgVals;
1816 ArgVals.reserve(Args.size());
1818 // Create a pointer value for every parameter declaration. This usually
1819 // entails copying one or more LLVM IR arguments into an alloca. Don't push
1820 // any cleanups or do anything that might unwind. We do that separately, so
1821 // we can push the cleanups in the correct order for the ABI.
1822 assert(FI.arg_size() == Args.size() &&
1823 "Mismatch between function signature & arguments.");
1825 CGFunctionInfo::const_arg_iterator info_it = FI.arg_begin();
1826 for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
1827 i != e; ++i, ++info_it, ++ArgNo) {
1828 const VarDecl *Arg = *i;
1829 QualType Ty = info_it->type;
1830 const ABIArgInfo &ArgI = info_it->info;
1833 isa<ParmVarDecl>(Arg) && cast<ParmVarDecl>(Arg)->isKNRPromoted();
1835 unsigned FirstIRArg, NumIRArgs;
1836 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
1838 switch (ArgI.getKind()) {
1839 case ABIArgInfo::InAlloca: {
1840 assert(NumIRArgs == 0);
1842 Builder.CreateStructGEP(FI.getArgStruct(), ArgStruct,
1843 ArgI.getInAllocaFieldIndex(), Arg->getName());
1844 ArgVals.push_back(ValueAndIsPtr(V, HavePointer));
1848 case ABIArgInfo::Indirect: {
1849 assert(NumIRArgs == 1);
1850 llvm::Value *V = FnArgs[FirstIRArg];
1852 if (!hasScalarEvaluationKind(Ty)) {
1853 // Aggregates and complex variables are accessed by reference. All we
1854 // need to do is realign the value, if requested
1855 if (ArgI.getIndirectRealign()) {
1856 llvm::Value *AlignedTemp = CreateMemTemp(Ty, "coerce");
1858 // Copy from the incoming argument pointer to the temporary with the
1859 // appropriate alignment.
1861 // FIXME: We should have a common utility for generating an aggregate
1863 llvm::Type *I8PtrTy = Builder.getInt8PtrTy();
1864 CharUnits Size = getContext().getTypeSizeInChars(Ty);
1865 llvm::Value *Dst = Builder.CreateBitCast(AlignedTemp, I8PtrTy);
1866 llvm::Value *Src = Builder.CreateBitCast(V, I8PtrTy);
1867 Builder.CreateMemCpy(Dst,
1869 llvm::ConstantInt::get(IntPtrTy,
1870 Size.getQuantity()),
1871 ArgI.getIndirectAlign(),
1875 ArgVals.push_back(ValueAndIsPtr(V, HavePointer));
1877 // Load scalar value from indirect argument.
1878 V = EmitLoadOfScalar(V, false, ArgI.getIndirectAlign(), Ty,
1879 Arg->getLocStart());
1882 V = emitArgumentDemotion(*this, Arg, V);
1883 ArgVals.push_back(ValueAndIsPtr(V, HaveValue));
1888 case ABIArgInfo::Extend:
1889 case ABIArgInfo::Direct: {
1891 // If we have the trivial case, handle it with no muss and fuss.
1892 if (!isa<llvm::StructType>(ArgI.getCoerceToType()) &&
1893 ArgI.getCoerceToType() == ConvertType(Ty) &&
1894 ArgI.getDirectOffset() == 0) {
1895 assert(NumIRArgs == 1);
1896 auto AI = FnArgs[FirstIRArg];
1897 llvm::Value *V = AI;
1899 if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(Arg)) {
1900 if (getNonNullAttr(CurCodeDecl, PVD, PVD->getType(),
1901 PVD->getFunctionScopeIndex()))
1902 AI->addAttr(llvm::AttributeSet::get(getLLVMContext(),
1904 llvm::Attribute::NonNull));
1906 QualType OTy = PVD->getOriginalType();
1907 if (const auto *ArrTy =
1908 getContext().getAsConstantArrayType(OTy)) {
1909 // A C99 array parameter declaration with the static keyword also
1910 // indicates dereferenceability, and if the size is constant we can
1911 // use the dereferenceable attribute (which requires the size in
1913 if (ArrTy->getSizeModifier() == ArrayType::Static) {
1914 QualType ETy = ArrTy->getElementType();
1915 uint64_t ArrSize = ArrTy->getSize().getZExtValue();
1916 if (!ETy->isIncompleteType() && ETy->isConstantSizeType() &&
1918 llvm::AttrBuilder Attrs;
1919 Attrs.addDereferenceableAttr(
1920 getContext().getTypeSizeInChars(ETy).getQuantity()*ArrSize);
1921 AI->addAttr(llvm::AttributeSet::get(getLLVMContext(),
1922 AI->getArgNo() + 1, Attrs));
1923 } else if (getContext().getTargetAddressSpace(ETy) == 0) {
1924 AI->addAttr(llvm::AttributeSet::get(getLLVMContext(),
1926 llvm::Attribute::NonNull));
1929 } else if (const auto *ArrTy =
1930 getContext().getAsVariableArrayType(OTy)) {
1931 // For C99 VLAs with the static keyword, we don't know the size so
1932 // we can't use the dereferenceable attribute, but in addrspace(0)
1933 // we know that it must be nonnull.
1934 if (ArrTy->getSizeModifier() == VariableArrayType::Static &&
1935 !getContext().getTargetAddressSpace(ArrTy->getElementType()))
1936 AI->addAttr(llvm::AttributeSet::get(getLLVMContext(),
1938 llvm::Attribute::NonNull));
1941 const auto *AVAttr = PVD->getAttr<AlignValueAttr>();
1943 if (const auto *TOTy = dyn_cast<TypedefType>(OTy))
1944 AVAttr = TOTy->getDecl()->getAttr<AlignValueAttr>();
1946 llvm::Value *AlignmentValue =
1947 EmitScalarExpr(AVAttr->getAlignment());
1948 llvm::ConstantInt *AlignmentCI =
1949 cast<llvm::ConstantInt>(AlignmentValue);
1950 unsigned Alignment =
1951 std::min((unsigned) AlignmentCI->getZExtValue(),
1952 +llvm::Value::MaximumAlignment);
1954 llvm::AttrBuilder Attrs;
1955 Attrs.addAlignmentAttr(Alignment);
1956 AI->addAttr(llvm::AttributeSet::get(getLLVMContext(),
1957 AI->getArgNo() + 1, Attrs));
1961 if (Arg->getType().isRestrictQualified())
1962 AI->addAttr(llvm::AttributeSet::get(getLLVMContext(),
1964 llvm::Attribute::NoAlias));
1966 // Ensure the argument is the correct type.
1967 if (V->getType() != ArgI.getCoerceToType())
1968 V = Builder.CreateBitCast(V, ArgI.getCoerceToType());
1971 V = emitArgumentDemotion(*this, Arg, V);
1973 if (const CXXMethodDecl *MD =
1974 dyn_cast_or_null<CXXMethodDecl>(CurCodeDecl)) {
1975 if (MD->isVirtual() && Arg == CXXABIThisDecl)
1976 V = CGM.getCXXABI().
1977 adjustThisParameterInVirtualFunctionPrologue(*this, CurGD, V);
1980 // Because of merging of function types from multiple decls it is
1981 // possible for the type of an argument to not match the corresponding
1982 // type in the function type. Since we are codegening the callee
1983 // in here, add a cast to the argument type.
1984 llvm::Type *LTy = ConvertType(Arg->getType());
1985 if (V->getType() != LTy)
1986 V = Builder.CreateBitCast(V, LTy);
1988 ArgVals.push_back(ValueAndIsPtr(V, HaveValue));
1992 llvm::AllocaInst *Alloca = CreateMemTemp(Ty, Arg->getName());
1994 // The alignment we need to use is the max of the requested alignment for
1995 // the argument plus the alignment required by our access code below.
1996 unsigned AlignmentToUse =
1997 CGM.getDataLayout().getABITypeAlignment(ArgI.getCoerceToType());
1998 AlignmentToUse = std::max(AlignmentToUse,
1999 (unsigned)getContext().getDeclAlign(Arg).getQuantity());
2001 Alloca->setAlignment(AlignmentToUse);
2002 llvm::Value *V = Alloca;
2003 llvm::Value *Ptr = V; // Pointer to store into.
2004 CharUnits PtrAlign = CharUnits::fromQuantity(AlignmentToUse);
2006 // If the value is offset in memory, apply the offset now.
2007 if (unsigned Offs = ArgI.getDirectOffset()) {
2008 Ptr = Builder.CreateBitCast(Ptr, Builder.getInt8PtrTy());
2009 Ptr = Builder.CreateConstGEP1_32(Builder.getInt8Ty(), Ptr, Offs);
2010 Ptr = Builder.CreateBitCast(Ptr,
2011 llvm::PointerType::getUnqual(ArgI.getCoerceToType()));
2012 PtrAlign = PtrAlign.alignmentAtOffset(CharUnits::fromQuantity(Offs));
2015 // Fast-isel and the optimizer generally like scalar values better than
2016 // FCAs, so we flatten them if this is safe to do for this argument.
2017 llvm::StructType *STy = dyn_cast<llvm::StructType>(ArgI.getCoerceToType());
2018 if (ArgI.isDirect() && ArgI.getCanBeFlattened() && STy &&
2019 STy->getNumElements() > 1) {
2020 uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(STy);
2022 cast<llvm::PointerType>(Ptr->getType())->getElementType();
2023 uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(DstTy);
2025 if (SrcSize <= DstSize) {
2026 Ptr = Builder.CreateBitCast(Ptr, llvm::PointerType::getUnqual(STy));
2028 assert(STy->getNumElements() == NumIRArgs);
2029 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
2030 auto AI = FnArgs[FirstIRArg + i];
2031 AI->setName(Arg->getName() + ".coerce" + Twine(i));
2032 llvm::Value *EltPtr = Builder.CreateConstGEP2_32(STy, Ptr, 0, i);
2033 Builder.CreateStore(AI, EltPtr);
2036 llvm::AllocaInst *TempAlloca =
2037 CreateTempAlloca(ArgI.getCoerceToType(), "coerce");
2038 TempAlloca->setAlignment(AlignmentToUse);
2039 llvm::Value *TempV = TempAlloca;
2041 assert(STy->getNumElements() == NumIRArgs);
2042 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
2043 auto AI = FnArgs[FirstIRArg + i];
2044 AI->setName(Arg->getName() + ".coerce" + Twine(i));
2045 llvm::Value *EltPtr =
2046 Builder.CreateConstGEP2_32(ArgI.getCoerceToType(), TempV, 0, i);
2047 Builder.CreateStore(AI, EltPtr);
2050 Builder.CreateMemCpy(Ptr, TempV, DstSize, AlignmentToUse);
2053 // Simple case, just do a coerced store of the argument into the alloca.
2054 assert(NumIRArgs == 1);
2055 auto AI = FnArgs[FirstIRArg];
2056 AI->setName(Arg->getName() + ".coerce");
2057 CreateCoercedStore(AI, Ptr, /*DestIsVolatile=*/false, PtrAlign, *this);
2061 // Match to what EmitParmDecl is expecting for this type.
2062 if (CodeGenFunction::hasScalarEvaluationKind(Ty)) {
2063 V = EmitLoadOfScalar(V, false, AlignmentToUse, Ty, Arg->getLocStart());
2065 V = emitArgumentDemotion(*this, Arg, V);
2066 ArgVals.push_back(ValueAndIsPtr(V, HaveValue));
2068 ArgVals.push_back(ValueAndIsPtr(V, HavePointer));
2073 case ABIArgInfo::Expand: {
2074 // If this structure was expanded into multiple arguments then
2075 // we need to create a temporary and reconstruct it from the
2077 llvm::AllocaInst *Alloca = CreateMemTemp(Ty);
2078 CharUnits Align = getContext().getDeclAlign(Arg);
2079 Alloca->setAlignment(Align.getQuantity());
2080 LValue LV = MakeAddrLValue(Alloca, Ty, Align);
2081 ArgVals.push_back(ValueAndIsPtr(Alloca, HavePointer));
2083 auto FnArgIter = FnArgs.begin() + FirstIRArg;
2084 ExpandTypeFromArgs(Ty, LV, FnArgIter);
2085 assert(FnArgIter == FnArgs.begin() + FirstIRArg + NumIRArgs);
2086 for (unsigned i = 0, e = NumIRArgs; i != e; ++i) {
2087 auto AI = FnArgs[FirstIRArg + i];
2088 AI->setName(Arg->getName() + "." + Twine(i));
2093 case ABIArgInfo::Ignore:
2094 assert(NumIRArgs == 0);
2095 // Initialize the local variable appropriately.
2096 if (!hasScalarEvaluationKind(Ty)) {
2097 ArgVals.push_back(ValueAndIsPtr(CreateMemTemp(Ty), HavePointer));
2099 llvm::Value *U = llvm::UndefValue::get(ConvertType(Arg->getType()));
2100 ArgVals.push_back(ValueAndIsPtr(U, HaveValue));
2106 if (getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) {
2107 for (int I = Args.size() - 1; I >= 0; --I)
2108 EmitParmDecl(*Args[I], ArgVals[I].getPointer(), ArgVals[I].getInt(),
2111 for (unsigned I = 0, E = Args.size(); I != E; ++I)
2112 EmitParmDecl(*Args[I], ArgVals[I].getPointer(), ArgVals[I].getInt(),
2117 static void eraseUnusedBitCasts(llvm::Instruction *insn) {
2118 while (insn->use_empty()) {
2119 llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(insn);
2120 if (!bitcast) return;
2122 // This is "safe" because we would have used a ConstantExpr otherwise.
2123 insn = cast<llvm::Instruction>(bitcast->getOperand(0));
2124 bitcast->eraseFromParent();
2128 /// Try to emit a fused autorelease of a return result.
2129 static llvm::Value *tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF,
2130 llvm::Value *result) {
2131 // We must be immediately followed the cast.
2132 llvm::BasicBlock *BB = CGF.Builder.GetInsertBlock();
2133 if (BB->empty()) return nullptr;
2134 if (&BB->back() != result) return nullptr;
2136 llvm::Type *resultType = result->getType();
2138 // result is in a BasicBlock and is therefore an Instruction.
2139 llvm::Instruction *generator = cast<llvm::Instruction>(result);
2141 SmallVector<llvm::Instruction*,4> insnsToKill;
2144 // %generator = bitcast %type1* %generator2 to %type2*
2145 while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(generator)) {
2146 // We would have emitted this as a constant if the operand weren't
2148 generator = cast<llvm::Instruction>(bitcast->getOperand(0));
2150 // Require the generator to be immediately followed by the cast.
2151 if (generator->getNextNode() != bitcast)
2154 insnsToKill.push_back(bitcast);
2158 // %generator = call i8* @objc_retain(i8* %originalResult)
2160 // %generator = call i8* @objc_retainAutoreleasedReturnValue(i8* %originalResult)
2161 llvm::CallInst *call = dyn_cast<llvm::CallInst>(generator);
2162 if (!call) return nullptr;
2164 bool doRetainAutorelease;
2166 if (call->getCalledValue() == CGF.CGM.getARCEntrypoints().objc_retain) {
2167 doRetainAutorelease = true;
2168 } else if (call->getCalledValue() == CGF.CGM.getARCEntrypoints()
2169 .objc_retainAutoreleasedReturnValue) {
2170 doRetainAutorelease = false;
2172 // If we emitted an assembly marker for this call (and the
2173 // ARCEntrypoints field should have been set if so), go looking
2174 // for that call. If we can't find it, we can't do this
2175 // optimization. But it should always be the immediately previous
2176 // instruction, unless we needed bitcasts around the call.
2177 if (CGF.CGM.getARCEntrypoints().retainAutoreleasedReturnValueMarker) {
2178 llvm::Instruction *prev = call->getPrevNode();
2180 if (isa<llvm::BitCastInst>(prev)) {
2181 prev = prev->getPrevNode();
2184 assert(isa<llvm::CallInst>(prev));
2185 assert(cast<llvm::CallInst>(prev)->getCalledValue() ==
2186 CGF.CGM.getARCEntrypoints().retainAutoreleasedReturnValueMarker);
2187 insnsToKill.push_back(prev);
2193 result = call->getArgOperand(0);
2194 insnsToKill.push_back(call);
2196 // Keep killing bitcasts, for sanity. Note that we no longer care
2197 // about precise ordering as long as there's exactly one use.
2198 while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(result)) {
2199 if (!bitcast->hasOneUse()) break;
2200 insnsToKill.push_back(bitcast);
2201 result = bitcast->getOperand(0);
2204 // Delete all the unnecessary instructions, from latest to earliest.
2205 for (SmallVectorImpl<llvm::Instruction*>::iterator
2206 i = insnsToKill.begin(), e = insnsToKill.end(); i != e; ++i)
2207 (*i)->eraseFromParent();
2209 // Do the fused retain/autorelease if we were asked to.
2210 if (doRetainAutorelease)
2211 result = CGF.EmitARCRetainAutoreleaseReturnValue(result);
2213 // Cast back to the result type.
2214 return CGF.Builder.CreateBitCast(result, resultType);
2217 /// If this is a +1 of the value of an immutable 'self', remove it.
2218 static llvm::Value *tryRemoveRetainOfSelf(CodeGenFunction &CGF,
2219 llvm::Value *result) {
2220 // This is only applicable to a method with an immutable 'self'.
2221 const ObjCMethodDecl *method =
2222 dyn_cast_or_null<ObjCMethodDecl>(CGF.CurCodeDecl);
2223 if (!method) return nullptr;
2224 const VarDecl *self = method->getSelfDecl();
2225 if (!self->getType().isConstQualified()) return nullptr;
2227 // Look for a retain call.
2228 llvm::CallInst *retainCall =
2229 dyn_cast<llvm::CallInst>(result->stripPointerCasts());
2231 retainCall->getCalledValue() != CGF.CGM.getARCEntrypoints().objc_retain)
2234 // Look for an ordinary load of 'self'.
2235 llvm::Value *retainedValue = retainCall->getArgOperand(0);
2236 llvm::LoadInst *load =
2237 dyn_cast<llvm::LoadInst>(retainedValue->stripPointerCasts());
2238 if (!load || load->isAtomic() || load->isVolatile() ||
2239 load->getPointerOperand() != CGF.GetAddrOfLocalVar(self))
2242 // Okay! Burn it all down. This relies for correctness on the
2243 // assumption that the retain is emitted as part of the return and
2244 // that thereafter everything is used "linearly".
2245 llvm::Type *resultType = result->getType();
2246 eraseUnusedBitCasts(cast<llvm::Instruction>(result));
2247 assert(retainCall->use_empty());
2248 retainCall->eraseFromParent();
2249 eraseUnusedBitCasts(cast<llvm::Instruction>(retainedValue));
2251 return CGF.Builder.CreateBitCast(load, resultType);
2254 /// Emit an ARC autorelease of the result of a function.
2256 /// \return the value to actually return from the function
2257 static llvm::Value *emitAutoreleaseOfResult(CodeGenFunction &CGF,
2258 llvm::Value *result) {
2259 // If we're returning 'self', kill the initial retain. This is a
2260 // heuristic attempt to "encourage correctness" in the really unfortunate
2261 // case where we have a return of self during a dealloc and we desperately
2262 // need to avoid the possible autorelease.
2263 if (llvm::Value *self = tryRemoveRetainOfSelf(CGF, result))
2266 // At -O0, try to emit a fused retain/autorelease.
2267 if (CGF.shouldUseFusedARCCalls())
2268 if (llvm::Value *fused = tryEmitFusedAutoreleaseOfResult(CGF, result))
2271 return CGF.EmitARCAutoreleaseReturnValue(result);
2274 /// Heuristically search for a dominating store to the return-value slot.
2275 static llvm::StoreInst *findDominatingStoreToReturnValue(CodeGenFunction &CGF) {
2276 // If there are multiple uses of the return-value slot, just check
2277 // for something immediately preceding the IP. Sometimes this can
2278 // happen with how we generate implicit-returns; it can also happen
2279 // with noreturn cleanups.
2280 if (!CGF.ReturnValue->hasOneUse()) {
2281 llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
2282 if (IP->empty()) return nullptr;
2283 llvm::Instruction *I = &IP->back();
2285 // Skip lifetime markers
2286 for (llvm::BasicBlock::reverse_iterator II = IP->rbegin(),
2289 if (llvm::IntrinsicInst *Intrinsic =
2290 dyn_cast<llvm::IntrinsicInst>(&*II)) {
2291 if (Intrinsic->getIntrinsicID() == llvm::Intrinsic::lifetime_end) {
2292 const llvm::Value *CastAddr = Intrinsic->getArgOperand(1);
2296 if (isa<llvm::BitCastInst>(&*II) && (CastAddr == &*II))
2304 llvm::StoreInst *store = dyn_cast<llvm::StoreInst>(I);
2305 if (!store) return nullptr;
2306 if (store->getPointerOperand() != CGF.ReturnValue) return nullptr;
2307 assert(!store->isAtomic() && !store->isVolatile()); // see below
2311 llvm::StoreInst *store =
2312 dyn_cast<llvm::StoreInst>(CGF.ReturnValue->user_back());
2313 if (!store) return nullptr;
2315 // These aren't actually possible for non-coerced returns, and we
2316 // only care about non-coerced returns on this code path.
2317 assert(!store->isAtomic() && !store->isVolatile());
2319 // Now do a first-and-dirty dominance check: just walk up the
2320 // single-predecessors chain from the current insertion point.
2321 llvm::BasicBlock *StoreBB = store->getParent();
2322 llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
2323 while (IP != StoreBB) {
2324 if (!(IP = IP->getSinglePredecessor()))
2328 // Okay, the store's basic block dominates the insertion point; we
2329 // can do our thing.
2333 void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI,
2335 SourceLocation EndLoc) {
2336 if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>()) {
2337 // Naked functions don't have epilogues.
2338 Builder.CreateUnreachable();
2342 // Functions with no result always return void.
2344 Builder.CreateRetVoid();
2348 llvm::DebugLoc RetDbgLoc;
2349 llvm::Value *RV = nullptr;
2350 QualType RetTy = FI.getReturnType();
2351 const ABIArgInfo &RetAI = FI.getReturnInfo();
2353 switch (RetAI.getKind()) {
2354 case ABIArgInfo::InAlloca:
2355 // Aggregrates get evaluated directly into the destination. Sometimes we
2356 // need to return the sret value in a register, though.
2357 assert(hasAggregateEvaluationKind(RetTy));
2358 if (RetAI.getInAllocaSRet()) {
2359 llvm::Function::arg_iterator EI = CurFn->arg_end();
2361 llvm::Value *ArgStruct = EI;
2362 llvm::Value *SRet = Builder.CreateStructGEP(
2363 nullptr, ArgStruct, RetAI.getInAllocaFieldIndex());
2364 RV = Builder.CreateLoad(SRet, "sret");
2368 case ABIArgInfo::Indirect: {
2369 auto AI = CurFn->arg_begin();
2370 if (RetAI.isSRetAfterThis())
2372 switch (getEvaluationKind(RetTy)) {
2375 EmitLoadOfComplex(MakeNaturalAlignAddrLValue(ReturnValue, RetTy),
2377 EmitStoreOfComplex(RT, MakeNaturalAlignAddrLValue(AI, RetTy),
2382 // Do nothing; aggregrates get evaluated directly into the destination.
2385 EmitStoreOfScalar(Builder.CreateLoad(ReturnValue),
2386 MakeNaturalAlignAddrLValue(AI, RetTy),
2393 case ABIArgInfo::Extend:
2394 case ABIArgInfo::Direct:
2395 if (RetAI.getCoerceToType() == ConvertType(RetTy) &&
2396 RetAI.getDirectOffset() == 0) {
2397 // The internal return value temp always will have pointer-to-return-type
2398 // type, just do a load.
2400 // If there is a dominating store to ReturnValue, we can elide
2401 // the load, zap the store, and usually zap the alloca.
2402 if (llvm::StoreInst *SI =
2403 findDominatingStoreToReturnValue(*this)) {
2404 // Reuse the debug location from the store unless there is
2405 // cleanup code to be emitted between the store and return
2407 if (EmitRetDbgLoc && !AutoreleaseResult)
2408 RetDbgLoc = SI->getDebugLoc();
2409 // Get the stored value and nuke the now-dead store.
2410 RV = SI->getValueOperand();
2411 SI->eraseFromParent();
2413 // If that was the only use of the return value, nuke it as well now.
2414 if (ReturnValue->use_empty() && isa<llvm::AllocaInst>(ReturnValue)) {
2415 cast<llvm::AllocaInst>(ReturnValue)->eraseFromParent();
2416 ReturnValue = nullptr;
2419 // Otherwise, we have to do a simple load.
2421 RV = Builder.CreateLoad(ReturnValue);
2424 llvm::Value *V = ReturnValue;
2425 CharUnits Align = getContext().getTypeAlignInChars(RetTy);
2426 // If the value is offset in memory, apply the offset now.
2427 if (unsigned Offs = RetAI.getDirectOffset()) {
2428 V = Builder.CreateBitCast(V, Builder.getInt8PtrTy());
2429 V = Builder.CreateConstGEP1_32(Builder.getInt8Ty(), V, Offs);
2430 V = Builder.CreateBitCast(V,
2431 llvm::PointerType::getUnqual(RetAI.getCoerceToType()));
2432 Align = Align.alignmentAtOffset(CharUnits::fromQuantity(Offs));
2435 RV = CreateCoercedLoad(V, RetAI.getCoerceToType(), Align, *this);
2438 // In ARC, end functions that return a retainable type with a call
2439 // to objc_autoreleaseReturnValue.
2440 if (AutoreleaseResult) {
2441 assert(getLangOpts().ObjCAutoRefCount &&
2442 !FI.isReturnsRetained() &&
2443 RetTy->isObjCRetainableType());
2444 RV = emitAutoreleaseOfResult(*this, RV);
2449 case ABIArgInfo::Ignore:
2452 case ABIArgInfo::Expand:
2453 llvm_unreachable("Invalid ABI kind for return argument");
2456 llvm::Instruction *Ret;
2458 if (SanOpts.has(SanitizerKind::ReturnsNonnullAttribute)) {
2459 if (auto RetNNAttr = CurGD.getDecl()->getAttr<ReturnsNonNullAttr>()) {
2460 SanitizerScope SanScope(this);
2461 llvm::Value *Cond = Builder.CreateICmpNE(
2462 RV, llvm::Constant::getNullValue(RV->getType()));
2463 llvm::Constant *StaticData[] = {
2464 EmitCheckSourceLocation(EndLoc),
2465 EmitCheckSourceLocation(RetNNAttr->getLocation()),
2467 EmitCheck(std::make_pair(Cond, SanitizerKind::ReturnsNonnullAttribute),
2468 "nonnull_return", StaticData, None);
2471 Ret = Builder.CreateRet(RV);
2473 Ret = Builder.CreateRetVoid();
2477 Ret->setDebugLoc(std::move(RetDbgLoc));
2480 static bool isInAllocaArgument(CGCXXABI &ABI, QualType type) {
2481 const CXXRecordDecl *RD = type->getAsCXXRecordDecl();
2482 return RD && ABI.getRecordArgABI(RD) == CGCXXABI::RAA_DirectInMemory;
2485 static AggValueSlot createPlaceholderSlot(CodeGenFunction &CGF, QualType Ty) {
2486 // FIXME: Generate IR in one pass, rather than going back and fixing up these
2488 llvm::Type *IRTy = CGF.ConvertTypeForMem(Ty);
2489 llvm::Value *Placeholder =
2490 llvm::UndefValue::get(IRTy->getPointerTo()->getPointerTo());
2491 Placeholder = CGF.Builder.CreateLoad(Placeholder);
2492 return AggValueSlot::forAddr(Placeholder, CharUnits::Zero(),
2494 AggValueSlot::IsNotDestructed,
2495 AggValueSlot::DoesNotNeedGCBarriers,
2496 AggValueSlot::IsNotAliased);
2499 void CodeGenFunction::EmitDelegateCallArg(CallArgList &args,
2500 const VarDecl *param,
2501 SourceLocation loc) {
2502 // StartFunction converted the ABI-lowered parameter(s) into a
2503 // local alloca. We need to turn that into an r-value suitable
2505 llvm::Value *local = GetAddrOfLocalVar(param);
2507 QualType type = param->getType();
2509 // For the most part, we just need to load the alloca, except:
2510 // 1) aggregate r-values are actually pointers to temporaries, and
2511 // 2) references to non-scalars are pointers directly to the aggregate.
2512 // I don't know why references to scalars are different here.
2513 if (const ReferenceType *ref = type->getAs<ReferenceType>()) {
2514 if (!hasScalarEvaluationKind(ref->getPointeeType()))
2515 return args.add(RValue::getAggregate(local), type);
2517 // Locals which are references to scalars are represented
2518 // with allocas holding the pointer.
2519 return args.add(RValue::get(Builder.CreateLoad(local)), type);
2522 assert(!isInAllocaArgument(CGM.getCXXABI(), type) &&
2523 "cannot emit delegate call arguments for inalloca arguments!");
2525 args.add(convertTempToRValue(local, type, loc), type);
2528 static bool isProvablyNull(llvm::Value *addr) {
2529 return isa<llvm::ConstantPointerNull>(addr);
2532 static bool isProvablyNonNull(llvm::Value *addr) {
2533 return isa<llvm::AllocaInst>(addr);
2536 /// Emit the actual writing-back of a writeback.
2537 static void emitWriteback(CodeGenFunction &CGF,
2538 const CallArgList::Writeback &writeback) {
2539 const LValue &srcLV = writeback.Source;
2540 llvm::Value *srcAddr = srcLV.getAddress();
2541 assert(!isProvablyNull(srcAddr) &&
2542 "shouldn't have writeback for provably null argument");
2544 llvm::BasicBlock *contBB = nullptr;
2546 // If the argument wasn't provably non-null, we need to null check
2547 // before doing the store.
2548 bool provablyNonNull = isProvablyNonNull(srcAddr);
2549 if (!provablyNonNull) {
2550 llvm::BasicBlock *writebackBB = CGF.createBasicBlock("icr.writeback");
2551 contBB = CGF.createBasicBlock("icr.done");
2553 llvm::Value *isNull = CGF.Builder.CreateIsNull(srcAddr, "icr.isnull");
2554 CGF.Builder.CreateCondBr(isNull, contBB, writebackBB);
2555 CGF.EmitBlock(writebackBB);
2558 // Load the value to writeback.
2559 llvm::Value *value = CGF.Builder.CreateLoad(writeback.Temporary);
2561 // Cast it back, in case we're writing an id to a Foo* or something.
2562 value = CGF.Builder.CreateBitCast(value,
2563 cast<llvm::PointerType>(srcAddr->getType())->getElementType(),
2564 "icr.writeback-cast");
2566 // Perform the writeback.
2568 // If we have a "to use" value, it's something we need to emit a use
2569 // of. This has to be carefully threaded in: if it's done after the
2570 // release it's potentially undefined behavior (and the optimizer
2571 // will ignore it), and if it happens before the retain then the
2572 // optimizer could move the release there.
2573 if (writeback.ToUse) {
2574 assert(srcLV.getObjCLifetime() == Qualifiers::OCL_Strong);
2576 // Retain the new value. No need to block-copy here: the block's
2577 // being passed up the stack.
2578 value = CGF.EmitARCRetainNonBlock(value);
2580 // Emit the intrinsic use here.
2581 CGF.EmitARCIntrinsicUse(writeback.ToUse);
2583 // Load the old value (primitively).
2584 llvm::Value *oldValue = CGF.EmitLoadOfScalar(srcLV, SourceLocation());
2586 // Put the new value in place (primitively).
2587 CGF.EmitStoreOfScalar(value, srcLV, /*init*/ false);
2589 // Release the old value.
2590 CGF.EmitARCRelease(oldValue, srcLV.isARCPreciseLifetime());
2592 // Otherwise, we can just do a normal lvalue store.
2594 CGF.EmitStoreThroughLValue(RValue::get(value), srcLV);
2597 // Jump to the continuation block.
2598 if (!provablyNonNull)
2599 CGF.EmitBlock(contBB);
2602 static void emitWritebacks(CodeGenFunction &CGF,
2603 const CallArgList &args) {
2604 for (const auto &I : args.writebacks())
2605 emitWriteback(CGF, I);
2608 static void deactivateArgCleanupsBeforeCall(CodeGenFunction &CGF,
2609 const CallArgList &CallArgs) {
2610 assert(CGF.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee());
2611 ArrayRef<CallArgList::CallArgCleanup> Cleanups =
2612 CallArgs.getCleanupsToDeactivate();
2613 // Iterate in reverse to increase the likelihood of popping the cleanup.
2614 for (ArrayRef<CallArgList::CallArgCleanup>::reverse_iterator
2615 I = Cleanups.rbegin(), E = Cleanups.rend(); I != E; ++I) {
2616 CGF.DeactivateCleanupBlock(I->Cleanup, I->IsActiveIP);
2617 I->IsActiveIP->eraseFromParent();
2621 static const Expr *maybeGetUnaryAddrOfOperand(const Expr *E) {
2622 if (const UnaryOperator *uop = dyn_cast<UnaryOperator>(E->IgnoreParens()))
2623 if (uop->getOpcode() == UO_AddrOf)
2624 return uop->getSubExpr();
2628 /// Emit an argument that's being passed call-by-writeback. That is,
2629 /// we are passing the address of
2630 static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args,
2631 const ObjCIndirectCopyRestoreExpr *CRE) {
2634 // Make an optimistic effort to emit the address as an l-value.
2635 // This can fail if the argument expression is more complicated.
2636 if (const Expr *lvExpr = maybeGetUnaryAddrOfOperand(CRE->getSubExpr())) {
2637 srcLV = CGF.EmitLValue(lvExpr);
2639 // Otherwise, just emit it as a scalar.
2641 llvm::Value *srcAddr = CGF.EmitScalarExpr(CRE->getSubExpr());
2643 QualType srcAddrType =
2644 CRE->getSubExpr()->getType()->castAs<PointerType>()->getPointeeType();
2645 srcLV = CGF.MakeNaturalAlignAddrLValue(srcAddr, srcAddrType);
2647 llvm::Value *srcAddr = srcLV.getAddress();
2649 // The dest and src types don't necessarily match in LLVM terms
2650 // because of the crazy ObjC compatibility rules.
2652 llvm::PointerType *destType =
2653 cast<llvm::PointerType>(CGF.ConvertType(CRE->getType()));
2655 // If the address is a constant null, just pass the appropriate null.
2656 if (isProvablyNull(srcAddr)) {
2657 args.add(RValue::get(llvm::ConstantPointerNull::get(destType)),
2662 // Create the temporary.
2663 llvm::Value *temp = CGF.CreateTempAlloca(destType->getElementType(),
2665 // Loading an l-value can introduce a cleanup if the l-value is __weak,
2666 // and that cleanup will be conditional if we can't prove that the l-value
2667 // isn't null, so we need to register a dominating point so that the cleanups
2668 // system will make valid IR.
2669 CodeGenFunction::ConditionalEvaluation condEval(CGF);
2671 // Zero-initialize it if we're not doing a copy-initialization.
2672 bool shouldCopy = CRE->shouldCopy();
2675 llvm::ConstantPointerNull::get(
2676 cast<llvm::PointerType>(destType->getElementType()));
2677 CGF.Builder.CreateStore(null, temp);
2680 llvm::BasicBlock *contBB = nullptr;
2681 llvm::BasicBlock *originBB = nullptr;
2683 // If the address is *not* known to be non-null, we need to switch.
2684 llvm::Value *finalArgument;
2686 bool provablyNonNull = isProvablyNonNull(srcAddr);
2687 if (provablyNonNull) {
2688 finalArgument = temp;
2690 llvm::Value *isNull = CGF.Builder.CreateIsNull(srcAddr, "icr.isnull");
2692 finalArgument = CGF.Builder.CreateSelect(isNull,
2693 llvm::ConstantPointerNull::get(destType),
2694 temp, "icr.argument");
2696 // If we need to copy, then the load has to be conditional, which
2697 // means we need control flow.
2699 originBB = CGF.Builder.GetInsertBlock();
2700 contBB = CGF.createBasicBlock("icr.cont");
2701 llvm::BasicBlock *copyBB = CGF.createBasicBlock("icr.copy");
2702 CGF.Builder.CreateCondBr(isNull, contBB, copyBB);
2703 CGF.EmitBlock(copyBB);
2704 condEval.begin(CGF);
2708 llvm::Value *valueToUse = nullptr;
2710 // Perform a copy if necessary.
2712 RValue srcRV = CGF.EmitLoadOfLValue(srcLV, SourceLocation());
2713 assert(srcRV.isScalar());
2715 llvm::Value *src = srcRV.getScalarVal();
2716 src = CGF.Builder.CreateBitCast(src, destType->getElementType(),
2719 // Use an ordinary store, not a store-to-lvalue.
2720 CGF.Builder.CreateStore(src, temp);
2722 // If optimization is enabled, and the value was held in a
2723 // __strong variable, we need to tell the optimizer that this
2724 // value has to stay alive until we're doing the store back.
2725 // This is because the temporary is effectively unretained,
2726 // and so otherwise we can violate the high-level semantics.
2727 if (CGF.CGM.getCodeGenOpts().OptimizationLevel != 0 &&
2728 srcLV.getObjCLifetime() == Qualifiers::OCL_Strong) {
2733 // Finish the control flow if we needed it.
2734 if (shouldCopy && !provablyNonNull) {
2735 llvm::BasicBlock *copyBB = CGF.Builder.GetInsertBlock();
2736 CGF.EmitBlock(contBB);
2738 // Make a phi for the value to intrinsically use.
2740 llvm::PHINode *phiToUse = CGF.Builder.CreatePHI(valueToUse->getType(), 2,
2742 phiToUse->addIncoming(valueToUse, copyBB);
2743 phiToUse->addIncoming(llvm::UndefValue::get(valueToUse->getType()),
2745 valueToUse = phiToUse;
2751 args.addWriteback(srcLV, temp, valueToUse);
2752 args.add(RValue::get(finalArgument), CRE->getType());
2755 void CallArgList::allocateArgumentMemory(CodeGenFunction &CGF) {
2756 assert(!StackBase && !StackCleanup.isValid());
2759 llvm::Function *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stacksave);
2760 StackBase = CGF.Builder.CreateCall(F, {}, "inalloca.save");
2762 // Control gets really tied up in landing pads, so we have to spill the
2763 // stacksave to an alloca to avoid violating SSA form.
2764 // TODO: This is dead if we never emit the cleanup. We should create the
2765 // alloca and store lazily on the first cleanup emission.
2766 StackBaseMem = CGF.CreateTempAlloca(CGF.Int8PtrTy, "inalloca.spmem");
2767 CGF.Builder.CreateStore(StackBase, StackBaseMem);
2768 CGF.pushStackRestore(EHCleanup, StackBaseMem);
2769 StackCleanup = CGF.EHStack.getInnermostEHScope();
2770 assert(StackCleanup.isValid());
2773 void CallArgList::freeArgumentMemory(CodeGenFunction &CGF) const {
2775 CGF.DeactivateCleanupBlock(StackCleanup, StackBase);
2776 llvm::Value *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stackrestore);
2777 // We could load StackBase from StackBaseMem, but in the non-exceptional
2778 // case we can skip it.
2779 CGF.Builder.CreateCall(F, StackBase);
2783 void CodeGenFunction::EmitNonNullArgCheck(RValue RV, QualType ArgType,
2784 SourceLocation ArgLoc,
2785 const FunctionDecl *FD,
2787 if (!SanOpts.has(SanitizerKind::NonnullAttribute) || !FD)
2789 auto PVD = ParmNum < FD->getNumParams() ? FD->getParamDecl(ParmNum) : nullptr;
2790 unsigned ArgNo = PVD ? PVD->getFunctionScopeIndex() : ParmNum;
2791 auto NNAttr = getNonNullAttr(FD, PVD, ArgType, ArgNo);
2794 SanitizerScope SanScope(this);
2795 assert(RV.isScalar());
2796 llvm::Value *V = RV.getScalarVal();
2798 Builder.CreateICmpNE(V, llvm::Constant::getNullValue(V->getType()));
2799 llvm::Constant *StaticData[] = {
2800 EmitCheckSourceLocation(ArgLoc),
2801 EmitCheckSourceLocation(NNAttr->getLocation()),
2802 llvm::ConstantInt::get(Int32Ty, ArgNo + 1),
2804 EmitCheck(std::make_pair(Cond, SanitizerKind::NonnullAttribute),
2805 "nonnull_arg", StaticData, None);
2808 void CodeGenFunction::EmitCallArgs(CallArgList &Args,
2809 ArrayRef<QualType> ArgTypes,
2810 CallExpr::const_arg_iterator ArgBeg,
2811 CallExpr::const_arg_iterator ArgEnd,
2812 const FunctionDecl *CalleeDecl,
2813 unsigned ParamsToSkip) {
2814 // We *have* to evaluate arguments from right to left in the MS C++ ABI,
2815 // because arguments are destroyed left to right in the callee.
2816 if (CGM.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) {
2817 // Insert a stack save if we're going to need any inalloca args.
2818 bool HasInAllocaArgs = false;
2819 for (ArrayRef<QualType>::iterator I = ArgTypes.begin(), E = ArgTypes.end();
2820 I != E && !HasInAllocaArgs; ++I)
2821 HasInAllocaArgs = isInAllocaArgument(CGM.getCXXABI(), *I);
2822 if (HasInAllocaArgs) {
2823 assert(getTarget().getTriple().getArch() == llvm::Triple::x86);
2824 Args.allocateArgumentMemory(*this);
2827 // Evaluate each argument.
2828 size_t CallArgsStart = Args.size();
2829 for (int I = ArgTypes.size() - 1; I >= 0; --I) {
2830 CallExpr::const_arg_iterator Arg = ArgBeg + I;
2831 EmitCallArg(Args, *Arg, ArgTypes[I]);
2832 EmitNonNullArgCheck(Args.back().RV, ArgTypes[I], Arg->getExprLoc(),
2833 CalleeDecl, ParamsToSkip + I);
2836 // Un-reverse the arguments we just evaluated so they match up with the LLVM
2838 std::reverse(Args.begin() + CallArgsStart, Args.end());
2842 for (unsigned I = 0, E = ArgTypes.size(); I != E; ++I) {
2843 CallExpr::const_arg_iterator Arg = ArgBeg + I;
2844 assert(Arg != ArgEnd);
2845 EmitCallArg(Args, *Arg, ArgTypes[I]);
2846 EmitNonNullArgCheck(Args.back().RV, ArgTypes[I], Arg->getExprLoc(),
2847 CalleeDecl, ParamsToSkip + I);
2853 struct DestroyUnpassedArg : EHScopeStack::Cleanup {
2854 DestroyUnpassedArg(llvm::Value *Addr, QualType Ty)
2855 : Addr(Addr), Ty(Ty) {}
2860 void Emit(CodeGenFunction &CGF, Flags flags) override {
2861 const CXXDestructorDecl *Dtor = Ty->getAsCXXRecordDecl()->getDestructor();
2862 assert(!Dtor->isTrivial());
2863 CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete, /*for vbase*/ false,
2864 /*Delegating=*/false, Addr);
2870 struct DisableDebugLocationUpdates {
2871 CodeGenFunction &CGF;
2872 bool disabledDebugInfo;
2873 DisableDebugLocationUpdates(CodeGenFunction &CGF, const Expr *E) : CGF(CGF) {
2874 if ((disabledDebugInfo = isa<CXXDefaultArgExpr>(E) && CGF.getDebugInfo()))
2875 CGF.disableDebugInfo();
2877 ~DisableDebugLocationUpdates() {
2878 if (disabledDebugInfo)
2879 CGF.enableDebugInfo();
2883 void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E,
2885 DisableDebugLocationUpdates Dis(*this, E);
2886 if (const ObjCIndirectCopyRestoreExpr *CRE
2887 = dyn_cast<ObjCIndirectCopyRestoreExpr>(E)) {
2888 assert(getLangOpts().ObjCAutoRefCount);
2889 assert(getContext().hasSameType(E->getType(), type));
2890 return emitWritebackArg(*this, args, CRE);
2893 assert(type->isReferenceType() == E->isGLValue() &&
2894 "reference binding to unmaterialized r-value!");
2896 if (E->isGLValue()) {
2897 assert(E->getObjectKind() == OK_Ordinary);
2898 return args.add(EmitReferenceBindingToExpr(E), type);
2901 bool HasAggregateEvalKind = hasAggregateEvaluationKind(type);
2903 // In the Microsoft C++ ABI, aggregate arguments are destructed by the callee.
2904 // However, we still have to push an EH-only cleanup in case we unwind before
2905 // we make it to the call.
2906 if (HasAggregateEvalKind &&
2907 CGM.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) {
2908 // If we're using inalloca, use the argument memory. Otherwise, use a
2911 if (args.isUsingInAlloca())
2912 Slot = createPlaceholderSlot(*this, type);
2914 Slot = CreateAggTemp(type, "agg.tmp");
2916 const CXXRecordDecl *RD = type->getAsCXXRecordDecl();
2917 bool DestroyedInCallee =
2918 RD && RD->hasNonTrivialDestructor() &&
2919 CGM.getCXXABI().getRecordArgABI(RD) != CGCXXABI::RAA_Default;
2920 if (DestroyedInCallee)
2921 Slot.setExternallyDestructed();
2923 EmitAggExpr(E, Slot);
2924 RValue RV = Slot.asRValue();
2927 if (DestroyedInCallee) {
2928 // Create a no-op GEP between the placeholder and the cleanup so we can
2929 // RAUW it successfully. It also serves as a marker of the first
2930 // instruction where the cleanup is active.
2931 pushFullExprCleanup<DestroyUnpassedArg>(EHCleanup, Slot.getAddr(), type);
2932 // This unreachable is a temporary marker which will be removed later.
2933 llvm::Instruction *IsActive = Builder.CreateUnreachable();
2934 args.addArgCleanupDeactivation(EHStack.getInnermostEHScope(), IsActive);
2939 if (HasAggregateEvalKind && isa<ImplicitCastExpr>(E) &&
2940 cast<CastExpr>(E)->getCastKind() == CK_LValueToRValue) {
2941 LValue L = EmitLValue(cast<CastExpr>(E)->getSubExpr());
2942 assert(L.isSimple());
2943 if (L.getAlignment() >= getContext().getTypeAlignInChars(type)) {
2944 args.add(L.asAggregateRValue(), type, /*NeedsCopy*/true);
2946 // We can't represent a misaligned lvalue in the CallArgList, so copy
2947 // to an aligned temporary now.
2948 llvm::Value *tmp = CreateMemTemp(type);
2949 EmitAggregateCopy(tmp, L.getAddress(), type, L.isVolatile(),
2951 args.add(RValue::getAggregate(tmp), type);
2956 args.add(EmitAnyExprToTemp(E), type);
2959 QualType CodeGenFunction::getVarArgType(const Expr *Arg) {
2960 // System headers on Windows define NULL to 0 instead of 0LL on Win64. MSVC
2961 // implicitly widens null pointer constants that are arguments to varargs
2962 // functions to pointer-sized ints.
2963 if (!getTarget().getTriple().isOSWindows())
2964 return Arg->getType();
2966 if (Arg->getType()->isIntegerType() &&
2967 getContext().getTypeSize(Arg->getType()) <
2968 getContext().getTargetInfo().getPointerWidth(0) &&
2969 Arg->isNullPointerConstant(getContext(),
2970 Expr::NPC_ValueDependentIsNotNull)) {
2971 return getContext().getIntPtrType();
2974 return Arg->getType();
2977 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
2978 // optimizer it can aggressively ignore unwind edges.
2980 CodeGenFunction::AddObjCARCExceptionMetadata(llvm::Instruction *Inst) {
2981 if (CGM.getCodeGenOpts().OptimizationLevel != 0 &&
2982 !CGM.getCodeGenOpts().ObjCAutoRefCountExceptions)
2983 Inst->setMetadata("clang.arc.no_objc_arc_exceptions",
2984 CGM.getNoObjCARCExceptionsMetadata());
2987 /// Emits a call to the given no-arguments nounwind runtime function.
2989 CodeGenFunction::EmitNounwindRuntimeCall(llvm::Value *callee,
2990 const llvm::Twine &name) {
2991 return EmitNounwindRuntimeCall(callee, None, name);
2994 /// Emits a call to the given nounwind runtime function.
2996 CodeGenFunction::EmitNounwindRuntimeCall(llvm::Value *callee,
2997 ArrayRef<llvm::Value*> args,
2998 const llvm::Twine &name) {
2999 llvm::CallInst *call = EmitRuntimeCall(callee, args, name);
3000 call->setDoesNotThrow();
3004 /// Emits a simple call (never an invoke) to the given no-arguments
3005 /// runtime function.
3007 CodeGenFunction::EmitRuntimeCall(llvm::Value *callee,
3008 const llvm::Twine &name) {
3009 return EmitRuntimeCall(callee, None, name);
3012 /// Emits a simple call (never an invoke) to the given runtime
3015 CodeGenFunction::EmitRuntimeCall(llvm::Value *callee,
3016 ArrayRef<llvm::Value*> args,
3017 const llvm::Twine &name) {
3018 llvm::CallInst *call = Builder.CreateCall(callee, args, name);
3019 call->setCallingConv(getRuntimeCC());
3023 /// Emits a call or invoke to the given noreturn runtime function.
3024 void CodeGenFunction::EmitNoreturnRuntimeCallOrInvoke(llvm::Value *callee,
3025 ArrayRef<llvm::Value*> args) {
3026 if (getInvokeDest()) {
3027 llvm::InvokeInst *invoke =
3028 Builder.CreateInvoke(callee,
3029 getUnreachableBlock(),
3032 invoke->setDoesNotReturn();
3033 invoke->setCallingConv(getRuntimeCC());
3035 llvm::CallInst *call = Builder.CreateCall(callee, args);
3036 call->setDoesNotReturn();
3037 call->setCallingConv(getRuntimeCC());
3038 Builder.CreateUnreachable();
3042 /// Emits a call or invoke instruction to the given nullary runtime
3045 CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::Value *callee,
3046 const Twine &name) {
3047 return EmitRuntimeCallOrInvoke(callee, None, name);
3050 /// Emits a call or invoke instruction to the given runtime function.
3052 CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::Value *callee,
3053 ArrayRef<llvm::Value*> args,
3054 const Twine &name) {
3055 llvm::CallSite callSite = EmitCallOrInvoke(callee, args, name);
3056 callSite.setCallingConv(getRuntimeCC());
3061 CodeGenFunction::EmitCallOrInvoke(llvm::Value *Callee,
3062 const Twine &Name) {
3063 return EmitCallOrInvoke(Callee, None, Name);
3066 /// Emits a call or invoke instruction to the given function, depending
3067 /// on the current state of the EH stack.
3069 CodeGenFunction::EmitCallOrInvoke(llvm::Value *Callee,
3070 ArrayRef<llvm::Value *> Args,
3071 const Twine &Name) {
3072 llvm::BasicBlock *InvokeDest = getInvokeDest();
3074 llvm::Instruction *Inst;
3076 Inst = Builder.CreateCall(Callee, Args, Name);
3078 llvm::BasicBlock *ContBB = createBasicBlock("invoke.cont");
3079 Inst = Builder.CreateInvoke(Callee, ContBB, InvokeDest, Args, Name);
3083 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
3084 // optimizer it can aggressively ignore unwind edges.
3085 if (CGM.getLangOpts().ObjCAutoRefCount)
3086 AddObjCARCExceptionMetadata(Inst);
3088 return llvm::CallSite(Inst);
3091 /// \brief Store a non-aggregate value to an address to initialize it. For
3092 /// initialization, a non-atomic store will be used.
3093 static void EmitInitStoreOfNonAggregate(CodeGenFunction &CGF, RValue Src,
3096 CGF.EmitStoreOfScalar(Src.getScalarVal(), Dst, /*init=*/true);
3098 CGF.EmitStoreOfComplex(Src.getComplexVal(), Dst, /*init=*/true);
3101 void CodeGenFunction::deferPlaceholderReplacement(llvm::Instruction *Old,
3103 DeferredReplacements.push_back(std::make_pair(Old, New));
3106 RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
3107 llvm::Value *Callee,
3108 ReturnValueSlot ReturnValue,
3109 const CallArgList &CallArgs,
3110 const Decl *TargetDecl,
3111 llvm::Instruction **callOrInvoke) {
3112 // FIXME: We no longer need the types from CallArgs; lift up and simplify.
3114 // Handle struct-return functions by passing a pointer to the
3115 // location that we would like to return into.
3116 QualType RetTy = CallInfo.getReturnType();
3117 const ABIArgInfo &RetAI = CallInfo.getReturnInfo();
3119 llvm::FunctionType *IRFuncTy =
3120 cast<llvm::FunctionType>(
3121 cast<llvm::PointerType>(Callee->getType())->getElementType());
3123 // If we're using inalloca, insert the allocation after the stack save.
3124 // FIXME: Do this earlier rather than hacking it in here!
3125 llvm::AllocaInst *ArgMemory = nullptr;
3126 if (llvm::StructType *ArgStruct = CallInfo.getArgStruct()) {
3127 llvm::Instruction *IP = CallArgs.getStackBase();
3128 llvm::AllocaInst *AI;
3130 IP = IP->getNextNode();
3131 AI = new llvm::AllocaInst(ArgStruct, "argmem", IP);
3133 AI = CreateTempAlloca(ArgStruct, "argmem");
3135 AI->setUsedWithInAlloca(true);
3136 assert(AI->isUsedWithInAlloca() && !AI->isStaticAlloca());
3140 ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), CallInfo);
3141 SmallVector<llvm::Value *, 16> IRCallArgs(IRFunctionArgs.totalIRArgs());
3143 // If the call returns a temporary with struct return, create a temporary
3144 // alloca to hold the result, unless one is given to us.
3145 llvm::Value *SRetPtr = nullptr;
3146 size_t UnusedReturnSize = 0;
3147 if (RetAI.isIndirect() || RetAI.isInAlloca()) {
3148 SRetPtr = ReturnValue.getValue();
3150 SRetPtr = CreateMemTemp(RetTy);
3151 if (HaveInsertPoint() && ReturnValue.isUnused()) {
3153 CGM.getDataLayout().getTypeAllocSize(ConvertTypeForMem(RetTy));
3154 if (EmitLifetimeStart(size, SRetPtr))
3155 UnusedReturnSize = size;
3158 if (IRFunctionArgs.hasSRetArg()) {
3159 IRCallArgs[IRFunctionArgs.getSRetArgNo()] = SRetPtr;
3162 Builder.CreateStructGEP(ArgMemory->getAllocatedType(), ArgMemory,
3163 RetAI.getInAllocaFieldIndex());
3164 Builder.CreateStore(SRetPtr, Addr);
3168 assert(CallInfo.arg_size() == CallArgs.size() &&
3169 "Mismatch between function signature & arguments.");
3171 CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin();
3172 for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end();
3173 I != E; ++I, ++info_it, ++ArgNo) {
3174 const ABIArgInfo &ArgInfo = info_it->info;
3177 CharUnits TypeAlign = getContext().getTypeAlignInChars(I->Ty);
3179 // Insert a padding argument to ensure proper alignment.
3180 if (IRFunctionArgs.hasPaddingArg(ArgNo))
3181 IRCallArgs[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
3182 llvm::UndefValue::get(ArgInfo.getPaddingType());
3184 unsigned FirstIRArg, NumIRArgs;
3185 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
3187 switch (ArgInfo.getKind()) {
3188 case ABIArgInfo::InAlloca: {
3189 assert(NumIRArgs == 0);
3190 assert(getTarget().getTriple().getArch() == llvm::Triple::x86);
3191 if (RV.isAggregate()) {
3192 // Replace the placeholder with the appropriate argument slot GEP.
3193 llvm::Instruction *Placeholder =
3194 cast<llvm::Instruction>(RV.getAggregateAddr());
3195 CGBuilderTy::InsertPoint IP = Builder.saveIP();
3196 Builder.SetInsertPoint(Placeholder);
3198 Builder.CreateStructGEP(ArgMemory->getAllocatedType(), ArgMemory,
3199 ArgInfo.getInAllocaFieldIndex());
3200 Builder.restoreIP(IP);
3201 deferPlaceholderReplacement(Placeholder, Addr);
3203 // Store the RValue into the argument struct.
3205 Builder.CreateStructGEP(ArgMemory->getAllocatedType(), ArgMemory,
3206 ArgInfo.getInAllocaFieldIndex());
3207 unsigned AS = Addr->getType()->getPointerAddressSpace();
3208 llvm::Type *MemType = ConvertTypeForMem(I->Ty)->getPointerTo(AS);
3209 // There are some cases where a trivial bitcast is not avoidable. The
3210 // definition of a type later in a translation unit may change it's type
3211 // from {}* to (%struct.foo*)*.
3212 if (Addr->getType() != MemType)
3213 Addr = Builder.CreateBitCast(Addr, MemType);
3214 LValue argLV = MakeAddrLValue(Addr, I->Ty, TypeAlign);
3215 EmitInitStoreOfNonAggregate(*this, RV, argLV);
3220 case ABIArgInfo::Indirect: {
3221 assert(NumIRArgs == 1);
3222 if (RV.isScalar() || RV.isComplex()) {
3223 // Make a temporary alloca to pass the argument.
3224 llvm::AllocaInst *AI = CreateMemTemp(I->Ty);
3225 if (ArgInfo.getIndirectAlign() > AI->getAlignment())
3226 AI->setAlignment(ArgInfo.getIndirectAlign());
3227 IRCallArgs[FirstIRArg] = AI;
3229 LValue argLV = MakeAddrLValue(AI, I->Ty, TypeAlign);
3230 EmitInitStoreOfNonAggregate(*this, RV, argLV);
3232 // We want to avoid creating an unnecessary temporary+copy here;
3233 // however, we need one in three cases:
3234 // 1. If the argument is not byval, and we are required to copy the
3235 // source. (This case doesn't occur on any common architecture.)
3236 // 2. If the argument is byval, RV is not sufficiently aligned, and
3237 // we cannot force it to be sufficiently aligned.
3238 // 3. If the argument is byval, but RV is located in an address space
3239 // different than that of the argument (0).
3240 llvm::Value *Addr = RV.getAggregateAddr();
3241 unsigned Align = ArgInfo.getIndirectAlign();
3242 const llvm::DataLayout *TD = &CGM.getDataLayout();
3243 const unsigned RVAddrSpace = Addr->getType()->getPointerAddressSpace();
3244 const unsigned ArgAddrSpace =
3245 (FirstIRArg < IRFuncTy->getNumParams()
3246 ? IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace()
3248 if ((!ArgInfo.getIndirectByVal() && I->NeedsCopy) ||
3249 (ArgInfo.getIndirectByVal() && TypeAlign.getQuantity() < Align &&
3250 llvm::getOrEnforceKnownAlignment(Addr, Align, *TD) < Align) ||
3251 (ArgInfo.getIndirectByVal() && (RVAddrSpace != ArgAddrSpace))) {
3252 // Create an aligned temporary, and copy to it.
3253 llvm::AllocaInst *AI = CreateMemTemp(I->Ty);
3254 if (Align > AI->getAlignment())
3255 AI->setAlignment(Align);
3256 IRCallArgs[FirstIRArg] = AI;
3257 EmitAggregateCopy(AI, Addr, I->Ty, RV.isVolatileQualified());
3259 // Skip the extra memcpy call.
3260 IRCallArgs[FirstIRArg] = Addr;
3266 case ABIArgInfo::Ignore:
3267 assert(NumIRArgs == 0);
3270 case ABIArgInfo::Extend:
3271 case ABIArgInfo::Direct: {
3272 if (!isa<llvm::StructType>(ArgInfo.getCoerceToType()) &&
3273 ArgInfo.getCoerceToType() == ConvertType(info_it->type) &&
3274 ArgInfo.getDirectOffset() == 0) {
3275 assert(NumIRArgs == 1);
3278 V = RV.getScalarVal();
3280 V = Builder.CreateLoad(RV.getAggregateAddr());
3282 // We might have to widen integers, but we should never truncate.
3283 if (ArgInfo.getCoerceToType() != V->getType() &&
3284 V->getType()->isIntegerTy())
3285 V = Builder.CreateZExt(V, ArgInfo.getCoerceToType());
3287 // If the argument doesn't match, perform a bitcast to coerce it. This
3288 // can happen due to trivial type mismatches.
3289 if (FirstIRArg < IRFuncTy->getNumParams() &&
3290 V->getType() != IRFuncTy->getParamType(FirstIRArg))
3291 V = Builder.CreateBitCast(V, IRFuncTy->getParamType(FirstIRArg));
3292 IRCallArgs[FirstIRArg] = V;
3296 // FIXME: Avoid the conversion through memory if possible.
3297 llvm::Value *SrcPtr;
3299 if (RV.isScalar() || RV.isComplex()) {
3300 SrcPtr = CreateMemTemp(I->Ty, "coerce");
3301 SrcAlign = TypeAlign;
3302 LValue SrcLV = MakeAddrLValue(SrcPtr, I->Ty, TypeAlign);
3303 EmitInitStoreOfNonAggregate(*this, RV, SrcLV);
3305 SrcPtr = RV.getAggregateAddr();
3306 // This alignment is guaranteed by EmitCallArg.
3307 SrcAlign = TypeAlign;
3310 // If the value is offset in memory, apply the offset now.
3311 if (unsigned Offs = ArgInfo.getDirectOffset()) {
3312 SrcPtr = Builder.CreateBitCast(SrcPtr, Builder.getInt8PtrTy());
3313 SrcPtr = Builder.CreateConstGEP1_32(Builder.getInt8Ty(), SrcPtr, Offs);
3314 SrcPtr = Builder.CreateBitCast(SrcPtr,
3315 llvm::PointerType::getUnqual(ArgInfo.getCoerceToType()));
3316 SrcAlign = SrcAlign.alignmentAtOffset(CharUnits::fromQuantity(Offs));
3319 // Fast-isel and the optimizer generally like scalar values better than
3320 // FCAs, so we flatten them if this is safe to do for this argument.
3321 llvm::StructType *STy =
3322 dyn_cast<llvm::StructType>(ArgInfo.getCoerceToType());
3323 if (STy && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) {
3325 cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
3326 uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(SrcTy);
3327 uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(STy);
3329 // If the source type is smaller than the destination type of the
3330 // coerce-to logic, copy the source value into a temp alloca the size
3331 // of the destination type to allow loading all of it. The bits past
3332 // the source value are left undef.
3333 if (SrcSize < DstSize) {
3334 llvm::AllocaInst *TempAlloca
3335 = CreateTempAlloca(STy, SrcPtr->getName() + ".coerce");
3336 Builder.CreateMemCpy(TempAlloca, SrcPtr, SrcSize, 0);
3337 SrcPtr = TempAlloca;
3339 SrcPtr = Builder.CreateBitCast(SrcPtr,
3340 llvm::PointerType::getUnqual(STy));
3343 assert(NumIRArgs == STy->getNumElements());
3344 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
3345 llvm::Value *EltPtr = Builder.CreateConstGEP2_32(STy, SrcPtr, 0, i);
3346 llvm::LoadInst *LI = Builder.CreateLoad(EltPtr);
3347 // We don't know what we're loading from.
3348 LI->setAlignment(1);
3349 IRCallArgs[FirstIRArg + i] = LI;
3352 // In the simple case, just pass the coerced loaded value.
3353 assert(NumIRArgs == 1);
3354 IRCallArgs[FirstIRArg] =
3355 CreateCoercedLoad(SrcPtr, ArgInfo.getCoerceToType(),
3362 case ABIArgInfo::Expand:
3363 unsigned IRArgPos = FirstIRArg;
3364 ExpandTypeToArgs(I->Ty, RV, IRFuncTy, IRCallArgs, IRArgPos);
3365 assert(IRArgPos == FirstIRArg + NumIRArgs);
3371 llvm::Value *Arg = ArgMemory;
3372 if (CallInfo.isVariadic()) {
3373 // When passing non-POD arguments by value to variadic functions, we will
3374 // end up with a variadic prototype and an inalloca call site. In such
3375 // cases, we can't do any parameter mismatch checks. Give up and bitcast
3378 cast<llvm::PointerType>(Callee->getType())->getAddressSpace();
3379 Callee = Builder.CreateBitCast(
3380 Callee, getTypes().GetFunctionType(CallInfo)->getPointerTo(CalleeAS));
3382 llvm::Type *LastParamTy =
3383 IRFuncTy->getParamType(IRFuncTy->getNumParams() - 1);
3384 if (Arg->getType() != LastParamTy) {
3386 // Assert that these structs have equivalent element types.
3387 llvm::StructType *FullTy = CallInfo.getArgStruct();
3388 llvm::StructType *DeclaredTy = cast<llvm::StructType>(
3389 cast<llvm::PointerType>(LastParamTy)->getElementType());
3390 assert(DeclaredTy->getNumElements() == FullTy->getNumElements());
3391 for (llvm::StructType::element_iterator DI = DeclaredTy->element_begin(),
3392 DE = DeclaredTy->element_end(),
3393 FI = FullTy->element_begin();
3394 DI != DE; ++DI, ++FI)
3397 Arg = Builder.CreateBitCast(Arg, LastParamTy);
3400 assert(IRFunctionArgs.hasInallocaArg());
3401 IRCallArgs[IRFunctionArgs.getInallocaArgNo()] = Arg;
3404 if (!CallArgs.getCleanupsToDeactivate().empty())
3405 deactivateArgCleanupsBeforeCall(*this, CallArgs);
3407 // If the callee is a bitcast of a function to a varargs pointer to function
3408 // type, check to see if we can remove the bitcast. This handles some cases
3409 // with unprototyped functions.
3410 if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Callee))
3411 if (llvm::Function *CalleeF = dyn_cast<llvm::Function>(CE->getOperand(0))) {
3412 llvm::PointerType *CurPT=cast<llvm::PointerType>(Callee->getType());
3413 llvm::FunctionType *CurFT =
3414 cast<llvm::FunctionType>(CurPT->getElementType());
3415 llvm::FunctionType *ActualFT = CalleeF->getFunctionType();
3417 if (CE->getOpcode() == llvm::Instruction::BitCast &&
3418 ActualFT->getReturnType() == CurFT->getReturnType() &&
3419 ActualFT->getNumParams() == CurFT->getNumParams() &&
3420 ActualFT->getNumParams() == IRCallArgs.size() &&
3421 (CurFT->isVarArg() || !ActualFT->isVarArg())) {
3422 bool ArgsMatch = true;
3423 for (unsigned i = 0, e = ActualFT->getNumParams(); i != e; ++i)
3424 if (ActualFT->getParamType(i) != CurFT->getParamType(i)) {
3429 // Strip the cast if we can get away with it. This is a nice cleanup,
3430 // but also allows us to inline the function at -O0 if it is marked
3437 assert(IRCallArgs.size() == IRFuncTy->getNumParams() || IRFuncTy->isVarArg());
3438 for (unsigned i = 0; i < IRCallArgs.size(); ++i) {
3439 // Inalloca argument can have different type.
3440 if (IRFunctionArgs.hasInallocaArg() &&
3441 i == IRFunctionArgs.getInallocaArgNo())
3443 if (i < IRFuncTy->getNumParams())
3444 assert(IRCallArgs[i]->getType() == IRFuncTy->getParamType(i));
3447 unsigned CallingConv;
3448 CodeGen::AttributeListType AttributeList;
3449 CGM.ConstructAttributeList(CallInfo, TargetDecl, AttributeList,
3451 llvm::AttributeSet Attrs = llvm::AttributeSet::get(getLLVMContext(),
3454 llvm::BasicBlock *InvokeDest = nullptr;
3455 if (!Attrs.hasAttribute(llvm::AttributeSet::FunctionIndex,
3456 llvm::Attribute::NoUnwind) ||
3457 currentFunctionUsesSEHTry())
3458 InvokeDest = getInvokeDest();
3462 CS = Builder.CreateCall(Callee, IRCallArgs);
3464 llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
3465 CS = Builder.CreateInvoke(Callee, Cont, InvokeDest, IRCallArgs);
3469 *callOrInvoke = CS.getInstruction();
3471 if (CurCodeDecl && CurCodeDecl->hasAttr<FlattenAttr>() &&
3472 !CS.hasFnAttr(llvm::Attribute::NoInline))
3474 Attrs.addAttribute(getLLVMContext(), llvm::AttributeSet::FunctionIndex,
3475 llvm::Attribute::AlwaysInline);
3477 // Disable inlining inside SEH __try blocks.
3478 if (isSEHTryScope())
3480 Attrs.addAttribute(getLLVMContext(), llvm::AttributeSet::FunctionIndex,
3481 llvm::Attribute::NoInline);
3483 CS.setAttributes(Attrs);
3484 CS.setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
3486 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
3487 // optimizer it can aggressively ignore unwind edges.
3488 if (CGM.getLangOpts().ObjCAutoRefCount)
3489 AddObjCARCExceptionMetadata(CS.getInstruction());
3491 // If the call doesn't return, finish the basic block and clear the
3492 // insertion point; this allows the rest of IRgen to discard
3493 // unreachable code.
3494 if (CS.doesNotReturn()) {
3495 if (UnusedReturnSize)
3496 EmitLifetimeEnd(llvm::ConstantInt::get(Int64Ty, UnusedReturnSize),
3499 Builder.CreateUnreachable();
3500 Builder.ClearInsertionPoint();
3502 // FIXME: For now, emit a dummy basic block because expr emitters in
3503 // generally are not ready to handle emitting expressions at unreachable
3505 EnsureInsertPoint();
3507 // Return a reasonable RValue.
3508 return GetUndefRValue(RetTy);
3511 llvm::Instruction *CI = CS.getInstruction();
3512 if (Builder.isNamePreserving() && !CI->getType()->isVoidTy())
3513 CI->setName("call");
3515 // Emit any writebacks immediately. Arguably this should happen
3516 // after any return-value munging.
3517 if (CallArgs.hasWritebacks())
3518 emitWritebacks(*this, CallArgs);
3520 // The stack cleanup for inalloca arguments has to run out of the normal
3521 // lexical order, so deactivate it and run it manually here.
3522 CallArgs.freeArgumentMemory(*this);
3525 switch (RetAI.getKind()) {
3526 case ABIArgInfo::InAlloca:
3527 case ABIArgInfo::Indirect: {
3528 RValue ret = convertTempToRValue(SRetPtr, RetTy, SourceLocation());
3529 if (UnusedReturnSize)
3530 EmitLifetimeEnd(llvm::ConstantInt::get(Int64Ty, UnusedReturnSize),
3535 case ABIArgInfo::Ignore:
3536 // If we are ignoring an argument that had a result, make sure to
3537 // construct the appropriate return value for our caller.
3538 return GetUndefRValue(RetTy);
3540 case ABIArgInfo::Extend:
3541 case ABIArgInfo::Direct: {
3542 llvm::Type *RetIRTy = ConvertType(RetTy);
3543 if (RetAI.getCoerceToType() == RetIRTy && RetAI.getDirectOffset() == 0) {
3544 switch (getEvaluationKind(RetTy)) {
3546 llvm::Value *Real = Builder.CreateExtractValue(CI, 0);
3547 llvm::Value *Imag = Builder.CreateExtractValue(CI, 1);
3548 return RValue::getComplex(std::make_pair(Real, Imag));
3550 case TEK_Aggregate: {
3551 llvm::Value *DestPtr = ReturnValue.getValue();
3552 bool DestIsVolatile = ReturnValue.isVolatile();
3553 CharUnits DestAlign = getContext().getTypeAlignInChars(RetTy);
3556 DestPtr = CreateMemTemp(RetTy, "agg.tmp");
3557 DestIsVolatile = false;
3559 BuildAggStore(*this, CI, DestPtr, DestIsVolatile, DestAlign);
3560 return RValue::getAggregate(DestPtr);
3563 // If the argument doesn't match, perform a bitcast to coerce it. This
3564 // can happen due to trivial type mismatches.
3565 llvm::Value *V = CI;
3566 if (V->getType() != RetIRTy)
3567 V = Builder.CreateBitCast(V, RetIRTy);
3568 return RValue::get(V);
3571 llvm_unreachable("bad evaluation kind");
3574 llvm::Value *DestPtr = ReturnValue.getValue();
3575 bool DestIsVolatile = ReturnValue.isVolatile();
3576 CharUnits DestAlign = getContext().getTypeAlignInChars(RetTy);
3579 DestPtr = CreateMemTemp(RetTy, "coerce");
3580 DestIsVolatile = false;
3583 // If the value is offset in memory, apply the offset now.
3584 llvm::Value *StorePtr = DestPtr;
3585 CharUnits StoreAlign = DestAlign;
3586 if (unsigned Offs = RetAI.getDirectOffset()) {
3587 StorePtr = Builder.CreateBitCast(StorePtr, Builder.getInt8PtrTy());
3589 Builder.CreateConstGEP1_32(Builder.getInt8Ty(), StorePtr, Offs);
3590 StorePtr = Builder.CreateBitCast(StorePtr,
3591 llvm::PointerType::getUnqual(RetAI.getCoerceToType()));
3593 StoreAlign.alignmentAtOffset(CharUnits::fromQuantity(Offs));
3595 CreateCoercedStore(CI, StorePtr, DestIsVolatile, StoreAlign, *this);
3597 return convertTempToRValue(DestPtr, RetTy, SourceLocation());
3600 case ABIArgInfo::Expand:
3601 llvm_unreachable("Invalid ABI kind for return argument");
3604 llvm_unreachable("Unhandled ABIArgInfo::Kind");
3607 if (Ret.isScalar() && TargetDecl) {
3608 if (const auto *AA = TargetDecl->getAttr<AssumeAlignedAttr>()) {
3609 llvm::Value *OffsetValue = nullptr;
3610 if (const auto *Offset = AA->getOffset())
3611 OffsetValue = EmitScalarExpr(Offset);
3613 llvm::Value *Alignment = EmitScalarExpr(AA->getAlignment());
3614 llvm::ConstantInt *AlignmentCI = cast<llvm::ConstantInt>(Alignment);
3615 EmitAlignmentAssumption(Ret.getScalarVal(), AlignmentCI->getZExtValue(),
3623 /* VarArg handling */
3625 llvm::Value *CodeGenFunction::EmitVAArg(llvm::Value *VAListAddr, QualType Ty) {
3626 return CGM.getTypes().getABIInfo().EmitVAArg(VAListAddr, Ty, *this);