1 //===--- CGCall.cpp - Encapsulate calling convention details --------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // These classes wrap the information about a call or function
11 // definition used to handle ABI compliancy.
13 //===----------------------------------------------------------------------===//
18 #include "CGCleanup.h"
19 #include "CodeGenFunction.h"
20 #include "CodeGenModule.h"
21 #include "TargetInfo.h"
22 #include "clang/AST/Decl.h"
23 #include "clang/AST/DeclCXX.h"
24 #include "clang/AST/DeclObjC.h"
25 #include "clang/Basic/TargetBuiltins.h"
26 #include "clang/Basic/TargetInfo.h"
27 #include "clang/CodeGen/CGFunctionInfo.h"
28 #include "clang/Frontend/CodeGenOptions.h"
29 #include "llvm/ADT/StringExtras.h"
30 #include "llvm/IR/Attributes.h"
31 #include "llvm/IR/CallSite.h"
32 #include "llvm/IR/DataLayout.h"
33 #include "llvm/IR/InlineAsm.h"
34 #include "llvm/IR/Intrinsics.h"
35 #include "llvm/IR/IntrinsicInst.h"
36 #include "llvm/Transforms/Utils/Local.h"
37 using namespace clang;
38 using namespace CodeGen;
42 static unsigned ClangCallConvToLLVMCallConv(CallingConv CC) {
44 default: return llvm::CallingConv::C;
45 case CC_X86StdCall: return llvm::CallingConv::X86_StdCall;
46 case CC_X86FastCall: return llvm::CallingConv::X86_FastCall;
47 case CC_X86ThisCall: return llvm::CallingConv::X86_ThisCall;
48 case CC_X86_64Win64: return llvm::CallingConv::X86_64_Win64;
49 case CC_X86_64SysV: return llvm::CallingConv::X86_64_SysV;
50 case CC_AAPCS: return llvm::CallingConv::ARM_AAPCS;
51 case CC_AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP;
52 case CC_IntelOclBicc: return llvm::CallingConv::Intel_OCL_BI;
53 // TODO: Add support for __pascal to LLVM.
54 case CC_X86Pascal: return llvm::CallingConv::C;
55 // TODO: Add support for __vectorcall to LLVM.
56 case CC_X86VectorCall: return llvm::CallingConv::X86_VectorCall;
57 case CC_SpirFunction: return llvm::CallingConv::SPIR_FUNC;
58 case CC_SpirKernel: return llvm::CallingConv::SPIR_KERNEL;
62 /// Derives the 'this' type for codegen purposes, i.e. ignoring method
64 /// FIXME: address space qualification?
65 static CanQualType GetThisType(ASTContext &Context, const CXXRecordDecl *RD) {
66 QualType RecTy = Context.getTagDeclType(RD)->getCanonicalTypeInternal();
67 return Context.getPointerType(CanQualType::CreateUnsafe(RecTy));
70 /// Returns the canonical formal type of the given C++ method.
71 static CanQual<FunctionProtoType> GetFormalType(const CXXMethodDecl *MD) {
72 return MD->getType()->getCanonicalTypeUnqualified()
73 .getAs<FunctionProtoType>();
76 /// Returns the "extra-canonicalized" return type, which discards
77 /// qualifiers on the return type. Codegen doesn't care about them,
78 /// and it makes ABI code a little easier to be able to assume that
79 /// all parameter and return types are top-level unqualified.
80 static CanQualType GetReturnType(QualType RetTy) {
81 return RetTy->getCanonicalTypeUnqualified().getUnqualifiedType();
84 /// Arrange the argument and result information for a value of the given
85 /// unprototyped freestanding function type.
86 const CGFunctionInfo &
87 CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionNoProtoType> FTNP) {
88 // When translating an unprototyped function type, always use a
90 return arrangeLLVMFunctionInfo(FTNP->getReturnType().getUnqualifiedType(),
91 /*instanceMethod=*/false,
92 /*chainCall=*/false, None,
93 FTNP->getExtInfo(), RequiredArgs(0));
96 /// Adds the formal paramaters in FPT to the given prefix. If any parameter in
97 /// FPT has pass_object_size attrs, then we'll add parameters for those, too.
98 static void appendParameterTypes(const CodeGenTypes &CGT,
99 SmallVectorImpl<CanQualType> &prefix,
100 const CanQual<FunctionProtoType> &FPT,
101 const FunctionDecl *FD) {
102 // Fast path: unknown target.
104 prefix.append(FPT->param_type_begin(), FPT->param_type_end());
108 // In the vast majority cases, we'll have precisely FPT->getNumParams()
109 // parameters; the only thing that can change this is the presence of
110 // pass_object_size. So, we preallocate for the common case.
111 prefix.reserve(prefix.size() + FPT->getNumParams());
113 assert(FD->getNumParams() == FPT->getNumParams());
114 for (unsigned I = 0, E = FPT->getNumParams(); I != E; ++I) {
115 prefix.push_back(FPT->getParamType(I));
116 if (FD->getParamDecl(I)->hasAttr<PassObjectSizeAttr>())
117 prefix.push_back(CGT.getContext().getSizeType());
121 /// Arrange the LLVM function layout for a value of the given function
122 /// type, on top of any implicit parameters already stored.
123 static const CGFunctionInfo &
124 arrangeLLVMFunctionInfo(CodeGenTypes &CGT, bool instanceMethod,
125 SmallVectorImpl<CanQualType> &prefix,
126 CanQual<FunctionProtoType> FTP,
127 const FunctionDecl *FD) {
128 RequiredArgs required = RequiredArgs::forPrototypePlus(FTP, prefix.size());
130 appendParameterTypes(CGT, prefix, FTP, FD);
131 CanQualType resultType = FTP->getReturnType().getUnqualifiedType();
132 return CGT.arrangeLLVMFunctionInfo(resultType, instanceMethod,
133 /*chainCall=*/false, prefix,
134 FTP->getExtInfo(), required);
137 /// Arrange the argument and result information for a value of the
138 /// given freestanding function type.
139 const CGFunctionInfo &
140 CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionProtoType> FTP,
141 const FunctionDecl *FD) {
142 SmallVector<CanQualType, 16> argTypes;
143 return ::arrangeLLVMFunctionInfo(*this, /*instanceMethod=*/false, argTypes,
147 static CallingConv getCallingConventionForDecl(const Decl *D, bool IsWindows) {
148 // Set the appropriate calling convention for the Function.
149 if (D->hasAttr<StdCallAttr>())
150 return CC_X86StdCall;
152 if (D->hasAttr<FastCallAttr>())
153 return CC_X86FastCall;
155 if (D->hasAttr<ThisCallAttr>())
156 return CC_X86ThisCall;
158 if (D->hasAttr<VectorCallAttr>())
159 return CC_X86VectorCall;
161 if (D->hasAttr<PascalAttr>())
164 if (PcsAttr *PCS = D->getAttr<PcsAttr>())
165 return (PCS->getPCS() == PcsAttr::AAPCS ? CC_AAPCS : CC_AAPCS_VFP);
167 if (D->hasAttr<IntelOclBiccAttr>())
168 return CC_IntelOclBicc;
170 if (D->hasAttr<MSABIAttr>())
171 return IsWindows ? CC_C : CC_X86_64Win64;
173 if (D->hasAttr<SysVABIAttr>())
174 return IsWindows ? CC_X86_64SysV : CC_C;
179 /// Arrange the argument and result information for a call to an
180 /// unknown C++ non-static member function of the given abstract type.
181 /// (Zero value of RD means we don't have any meaningful "this" argument type,
182 /// so fall back to a generic pointer type).
183 /// The member function must be an ordinary function, i.e. not a
184 /// constructor or destructor.
185 const CGFunctionInfo &
186 CodeGenTypes::arrangeCXXMethodType(const CXXRecordDecl *RD,
187 const FunctionProtoType *FTP,
188 const CXXMethodDecl *MD) {
189 SmallVector<CanQualType, 16> argTypes;
191 // Add the 'this' pointer.
193 argTypes.push_back(GetThisType(Context, RD));
195 argTypes.push_back(Context.VoidPtrTy);
197 return ::arrangeLLVMFunctionInfo(
198 *this, true, argTypes,
199 FTP->getCanonicalTypeUnqualified().getAs<FunctionProtoType>(), MD);
202 /// Arrange the argument and result information for a declaration or
203 /// definition of the given C++ non-static member function. The
204 /// member function must be an ordinary function, i.e. not a
205 /// constructor or destructor.
206 const CGFunctionInfo &
207 CodeGenTypes::arrangeCXXMethodDeclaration(const CXXMethodDecl *MD) {
208 assert(!isa<CXXConstructorDecl>(MD) && "wrong method for constructors!");
209 assert(!isa<CXXDestructorDecl>(MD) && "wrong method for destructors!");
211 CanQual<FunctionProtoType> prototype = GetFormalType(MD);
213 if (MD->isInstance()) {
214 // The abstract case is perfectly fine.
215 const CXXRecordDecl *ThisType = TheCXXABI.getThisArgumentTypeForMethod(MD);
216 return arrangeCXXMethodType(ThisType, prototype.getTypePtr(), MD);
219 return arrangeFreeFunctionType(prototype, MD);
222 const CGFunctionInfo &
223 CodeGenTypes::arrangeCXXStructorDeclaration(const CXXMethodDecl *MD,
226 SmallVector<CanQualType, 16> argTypes;
227 argTypes.push_back(GetThisType(Context, MD->getParent()));
230 if (auto *CD = dyn_cast<CXXConstructorDecl>(MD)) {
231 GD = GlobalDecl(CD, toCXXCtorType(Type));
233 auto *DD = dyn_cast<CXXDestructorDecl>(MD);
234 GD = GlobalDecl(DD, toCXXDtorType(Type));
237 CanQual<FunctionProtoType> FTP = GetFormalType(MD);
239 // Add the formal parameters.
240 appendParameterTypes(*this, argTypes, FTP, MD);
242 TheCXXABI.buildStructorSignature(MD, Type, argTypes);
244 RequiredArgs required =
245 (MD->isVariadic() ? RequiredArgs(argTypes.size()) : RequiredArgs::All);
247 FunctionType::ExtInfo extInfo = FTP->getExtInfo();
248 CanQualType resultType = TheCXXABI.HasThisReturn(GD)
250 : TheCXXABI.hasMostDerivedReturn(GD)
251 ? CGM.getContext().VoidPtrTy
253 return arrangeLLVMFunctionInfo(resultType, /*instanceMethod=*/true,
254 /*chainCall=*/false, argTypes, extInfo,
258 /// Arrange a call to a C++ method, passing the given arguments.
259 const CGFunctionInfo &
260 CodeGenTypes::arrangeCXXConstructorCall(const CallArgList &args,
261 const CXXConstructorDecl *D,
262 CXXCtorType CtorKind,
263 unsigned ExtraArgs) {
265 SmallVector<CanQualType, 16> ArgTypes;
266 for (const auto &Arg : args)
267 ArgTypes.push_back(Context.getCanonicalParamType(Arg.Ty));
269 CanQual<FunctionProtoType> FPT = GetFormalType(D);
270 RequiredArgs Required = RequiredArgs::forPrototypePlus(FPT, 1 + ExtraArgs);
271 GlobalDecl GD(D, CtorKind);
272 CanQualType ResultType = TheCXXABI.HasThisReturn(GD)
274 : TheCXXABI.hasMostDerivedReturn(GD)
275 ? CGM.getContext().VoidPtrTy
278 FunctionType::ExtInfo Info = FPT->getExtInfo();
279 return arrangeLLVMFunctionInfo(ResultType, /*instanceMethod=*/true,
280 /*chainCall=*/false, ArgTypes, Info,
284 /// Arrange the argument and result information for the declaration or
285 /// definition of the given function.
286 const CGFunctionInfo &
287 CodeGenTypes::arrangeFunctionDeclaration(const FunctionDecl *FD) {
288 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD))
289 if (MD->isInstance())
290 return arrangeCXXMethodDeclaration(MD);
292 CanQualType FTy = FD->getType()->getCanonicalTypeUnqualified();
294 assert(isa<FunctionType>(FTy));
296 // When declaring a function without a prototype, always use a
297 // non-variadic type.
298 if (isa<FunctionNoProtoType>(FTy)) {
299 CanQual<FunctionNoProtoType> noProto = FTy.getAs<FunctionNoProtoType>();
300 return arrangeLLVMFunctionInfo(
301 noProto->getReturnType(), /*instanceMethod=*/false,
302 /*chainCall=*/false, None, noProto->getExtInfo(), RequiredArgs::All);
305 assert(isa<FunctionProtoType>(FTy));
306 return arrangeFreeFunctionType(FTy.getAs<FunctionProtoType>(), FD);
309 /// Arrange the argument and result information for the declaration or
310 /// definition of an Objective-C method.
311 const CGFunctionInfo &
312 CodeGenTypes::arrangeObjCMethodDeclaration(const ObjCMethodDecl *MD) {
313 // It happens that this is the same as a call with no optional
314 // arguments, except also using the formal 'self' type.
315 return arrangeObjCMessageSendSignature(MD, MD->getSelfDecl()->getType());
318 /// Arrange the argument and result information for the function type
319 /// through which to perform a send to the given Objective-C method,
320 /// using the given receiver type. The receiver type is not always
321 /// the 'self' type of the method or even an Objective-C pointer type.
322 /// This is *not* the right method for actually performing such a
323 /// message send, due to the possibility of optional arguments.
324 const CGFunctionInfo &
325 CodeGenTypes::arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD,
326 QualType receiverType) {
327 SmallVector<CanQualType, 16> argTys;
328 argTys.push_back(Context.getCanonicalParamType(receiverType));
329 argTys.push_back(Context.getCanonicalParamType(Context.getObjCSelType()));
331 for (const auto *I : MD->params()) {
332 argTys.push_back(Context.getCanonicalParamType(I->getType()));
335 FunctionType::ExtInfo einfo;
336 bool IsWindows = getContext().getTargetInfo().getTriple().isOSWindows();
337 einfo = einfo.withCallingConv(getCallingConventionForDecl(MD, IsWindows));
339 if (getContext().getLangOpts().ObjCAutoRefCount &&
340 MD->hasAttr<NSReturnsRetainedAttr>())
341 einfo = einfo.withProducesResult(true);
343 RequiredArgs required =
344 (MD->isVariadic() ? RequiredArgs(argTys.size()) : RequiredArgs::All);
346 return arrangeLLVMFunctionInfo(
347 GetReturnType(MD->getReturnType()), /*instanceMethod=*/false,
348 /*chainCall=*/false, argTys, einfo, required);
351 const CGFunctionInfo &
352 CodeGenTypes::arrangeGlobalDeclaration(GlobalDecl GD) {
353 // FIXME: Do we need to handle ObjCMethodDecl?
354 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
356 if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD))
357 return arrangeCXXStructorDeclaration(CD, getFromCtorType(GD.getCtorType()));
359 if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(FD))
360 return arrangeCXXStructorDeclaration(DD, getFromDtorType(GD.getDtorType()));
362 return arrangeFunctionDeclaration(FD);
365 /// Arrange a thunk that takes 'this' as the first parameter followed by
366 /// varargs. Return a void pointer, regardless of the actual return type.
367 /// The body of the thunk will end in a musttail call to a function of the
368 /// correct type, and the caller will bitcast the function to the correct
370 const CGFunctionInfo &
371 CodeGenTypes::arrangeMSMemberPointerThunk(const CXXMethodDecl *MD) {
372 assert(MD->isVirtual() && "only virtual memptrs have thunks");
373 CanQual<FunctionProtoType> FTP = GetFormalType(MD);
374 CanQualType ArgTys[] = { GetThisType(Context, MD->getParent()) };
375 return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/false,
376 /*chainCall=*/false, ArgTys,
377 FTP->getExtInfo(), RequiredArgs(1));
380 const CGFunctionInfo &
381 CodeGenTypes::arrangeMSCtorClosure(const CXXConstructorDecl *CD,
383 assert(CT == Ctor_CopyingClosure || CT == Ctor_DefaultClosure);
385 CanQual<FunctionProtoType> FTP = GetFormalType(CD);
386 SmallVector<CanQualType, 2> ArgTys;
387 const CXXRecordDecl *RD = CD->getParent();
388 ArgTys.push_back(GetThisType(Context, RD));
389 if (CT == Ctor_CopyingClosure)
390 ArgTys.push_back(*FTP->param_type_begin());
391 if (RD->getNumVBases() > 0)
392 ArgTys.push_back(Context.IntTy);
393 CallingConv CC = Context.getDefaultCallingConvention(
394 /*IsVariadic=*/false, /*IsCXXMethod=*/true);
395 return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/true,
396 /*chainCall=*/false, ArgTys,
397 FunctionType::ExtInfo(CC), RequiredArgs::All);
400 /// Arrange a call as unto a free function, except possibly with an
401 /// additional number of formal parameters considered required.
402 static const CGFunctionInfo &
403 arrangeFreeFunctionLikeCall(CodeGenTypes &CGT,
405 const CallArgList &args,
406 const FunctionType *fnType,
407 unsigned numExtraRequiredArgs,
409 assert(args.size() >= numExtraRequiredArgs);
411 // In most cases, there are no optional arguments.
412 RequiredArgs required = RequiredArgs::All;
414 // If we have a variadic prototype, the required arguments are the
415 // extra prefix plus the arguments in the prototype.
416 if (const FunctionProtoType *proto = dyn_cast<FunctionProtoType>(fnType)) {
417 if (proto->isVariadic())
418 required = RequiredArgs(proto->getNumParams() + numExtraRequiredArgs);
420 // If we don't have a prototype at all, but we're supposed to
421 // explicitly use the variadic convention for unprototyped calls,
422 // treat all of the arguments as required but preserve the nominal
423 // possibility of variadics.
424 } else if (CGM.getTargetCodeGenInfo()
425 .isNoProtoCallVariadic(args,
426 cast<FunctionNoProtoType>(fnType))) {
427 required = RequiredArgs(args.size());
431 SmallVector<CanQualType, 16> argTypes;
432 for (const auto &arg : args)
433 argTypes.push_back(CGT.getContext().getCanonicalParamType(arg.Ty));
434 return CGT.arrangeLLVMFunctionInfo(GetReturnType(fnType->getReturnType()),
435 /*instanceMethod=*/false, chainCall,
436 argTypes, fnType->getExtInfo(), required);
439 /// Figure out the rules for calling a function with the given formal
440 /// type using the given arguments. The arguments are necessary
441 /// because the function might be unprototyped, in which case it's
442 /// target-dependent in crazy ways.
443 const CGFunctionInfo &
444 CodeGenTypes::arrangeFreeFunctionCall(const CallArgList &args,
445 const FunctionType *fnType,
447 return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType,
448 chainCall ? 1 : 0, chainCall);
451 /// A block function call is essentially a free-function call with an
452 /// extra implicit argument.
453 const CGFunctionInfo &
454 CodeGenTypes::arrangeBlockFunctionCall(const CallArgList &args,
455 const FunctionType *fnType) {
456 return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType, 1,
457 /*chainCall=*/false);
460 const CGFunctionInfo &
461 CodeGenTypes::arrangeFreeFunctionCall(QualType resultType,
462 const CallArgList &args,
463 FunctionType::ExtInfo info,
464 RequiredArgs required) {
466 SmallVector<CanQualType, 16> argTypes;
467 for (const auto &Arg : args)
468 argTypes.push_back(Context.getCanonicalParamType(Arg.Ty));
469 return arrangeLLVMFunctionInfo(
470 GetReturnType(resultType), /*instanceMethod=*/false,
471 /*chainCall=*/false, argTypes, info, required);
474 /// Arrange a call to a C++ method, passing the given arguments.
475 const CGFunctionInfo &
476 CodeGenTypes::arrangeCXXMethodCall(const CallArgList &args,
477 const FunctionProtoType *FPT,
478 RequiredArgs required) {
480 SmallVector<CanQualType, 16> argTypes;
481 for (const auto &Arg : args)
482 argTypes.push_back(Context.getCanonicalParamType(Arg.Ty));
484 FunctionType::ExtInfo info = FPT->getExtInfo();
485 return arrangeLLVMFunctionInfo(
486 GetReturnType(FPT->getReturnType()), /*instanceMethod=*/true,
487 /*chainCall=*/false, argTypes, info, required);
490 const CGFunctionInfo &CodeGenTypes::arrangeFreeFunctionDeclaration(
491 QualType resultType, const FunctionArgList &args,
492 const FunctionType::ExtInfo &info, bool isVariadic) {
494 SmallVector<CanQualType, 16> argTypes;
495 for (auto Arg : args)
496 argTypes.push_back(Context.getCanonicalParamType(Arg->getType()));
498 RequiredArgs required =
499 (isVariadic ? RequiredArgs(args.size()) : RequiredArgs::All);
500 return arrangeLLVMFunctionInfo(
501 GetReturnType(resultType), /*instanceMethod=*/false,
502 /*chainCall=*/false, argTypes, info, required);
505 const CGFunctionInfo &CodeGenTypes::arrangeNullaryFunction() {
506 return arrangeLLVMFunctionInfo(
507 getContext().VoidTy, /*instanceMethod=*/false, /*chainCall=*/false,
508 None, FunctionType::ExtInfo(), RequiredArgs::All);
511 /// Arrange the argument and result information for an abstract value
512 /// of a given function type. This is the method which all of the
513 /// above functions ultimately defer to.
514 const CGFunctionInfo &
515 CodeGenTypes::arrangeLLVMFunctionInfo(CanQualType resultType,
518 ArrayRef<CanQualType> argTypes,
519 FunctionType::ExtInfo info,
520 RequiredArgs required) {
521 assert(std::all_of(argTypes.begin(), argTypes.end(),
522 std::mem_fun_ref(&CanQualType::isCanonicalAsParam)));
524 unsigned CC = ClangCallConvToLLVMCallConv(info.getCC());
526 // Lookup or create unique function info.
527 llvm::FoldingSetNodeID ID;
528 CGFunctionInfo::Profile(ID, instanceMethod, chainCall, info, required,
529 resultType, argTypes);
531 void *insertPos = nullptr;
532 CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, insertPos);
536 // Construct the function info. We co-allocate the ArgInfos.
537 FI = CGFunctionInfo::create(CC, instanceMethod, chainCall, info,
538 resultType, argTypes, required);
539 FunctionInfos.InsertNode(FI, insertPos);
541 bool inserted = FunctionsBeingProcessed.insert(FI).second;
543 assert(inserted && "Recursively being processed?");
545 // Compute ABI information.
546 getABIInfo().computeInfo(*FI);
548 // Loop over all of the computed argument and return value info. If any of
549 // them are direct or extend without a specified coerce type, specify the
551 ABIArgInfo &retInfo = FI->getReturnInfo();
552 if (retInfo.canHaveCoerceToType() && retInfo.getCoerceToType() == nullptr)
553 retInfo.setCoerceToType(ConvertType(FI->getReturnType()));
555 for (auto &I : FI->arguments())
556 if (I.info.canHaveCoerceToType() && I.info.getCoerceToType() == nullptr)
557 I.info.setCoerceToType(ConvertType(I.type));
559 bool erased = FunctionsBeingProcessed.erase(FI); (void)erased;
560 assert(erased && "Not in set?");
565 CGFunctionInfo *CGFunctionInfo::create(unsigned llvmCC,
568 const FunctionType::ExtInfo &info,
569 CanQualType resultType,
570 ArrayRef<CanQualType> argTypes,
571 RequiredArgs required) {
572 void *buffer = operator new(sizeof(CGFunctionInfo) +
573 sizeof(ArgInfo) * (argTypes.size() + 1));
574 CGFunctionInfo *FI = new(buffer) CGFunctionInfo();
575 FI->CallingConvention = llvmCC;
576 FI->EffectiveCallingConvention = llvmCC;
577 FI->ASTCallingConvention = info.getCC();
578 FI->InstanceMethod = instanceMethod;
579 FI->ChainCall = chainCall;
580 FI->NoReturn = info.getNoReturn();
581 FI->ReturnsRetained = info.getProducesResult();
582 FI->Required = required;
583 FI->HasRegParm = info.getHasRegParm();
584 FI->RegParm = info.getRegParm();
585 FI->ArgStruct = nullptr;
586 FI->ArgStructAlign = 0;
587 FI->NumArgs = argTypes.size();
588 FI->getArgsBuffer()[0].type = resultType;
589 for (unsigned i = 0, e = argTypes.size(); i != e; ++i)
590 FI->getArgsBuffer()[i + 1].type = argTypes[i];
597 // ABIArgInfo::Expand implementation.
599 // Specifies the way QualType passed as ABIArgInfo::Expand is expanded.
600 struct TypeExpansion {
601 enum TypeExpansionKind {
602 // Elements of constant arrays are expanded recursively.
604 // Record fields are expanded recursively (but if record is a union, only
605 // the field with the largest size is expanded).
607 // For complex types, real and imaginary parts are expanded recursively.
609 // All other types are not expandable.
613 const TypeExpansionKind Kind;
615 TypeExpansion(TypeExpansionKind K) : Kind(K) {}
616 virtual ~TypeExpansion() {}
619 struct ConstantArrayExpansion : TypeExpansion {
623 ConstantArrayExpansion(QualType EltTy, uint64_t NumElts)
624 : TypeExpansion(TEK_ConstantArray), EltTy(EltTy), NumElts(NumElts) {}
625 static bool classof(const TypeExpansion *TE) {
626 return TE->Kind == TEK_ConstantArray;
630 struct RecordExpansion : TypeExpansion {
631 SmallVector<const CXXBaseSpecifier *, 1> Bases;
633 SmallVector<const FieldDecl *, 1> Fields;
635 RecordExpansion(SmallVector<const CXXBaseSpecifier *, 1> &&Bases,
636 SmallVector<const FieldDecl *, 1> &&Fields)
637 : TypeExpansion(TEK_Record), Bases(Bases), Fields(Fields) {}
638 static bool classof(const TypeExpansion *TE) {
639 return TE->Kind == TEK_Record;
643 struct ComplexExpansion : TypeExpansion {
646 ComplexExpansion(QualType EltTy) : TypeExpansion(TEK_Complex), EltTy(EltTy) {}
647 static bool classof(const TypeExpansion *TE) {
648 return TE->Kind == TEK_Complex;
652 struct NoExpansion : TypeExpansion {
653 NoExpansion() : TypeExpansion(TEK_None) {}
654 static bool classof(const TypeExpansion *TE) {
655 return TE->Kind == TEK_None;
660 static std::unique_ptr<TypeExpansion>
661 getTypeExpansion(QualType Ty, const ASTContext &Context) {
662 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) {
663 return llvm::make_unique<ConstantArrayExpansion>(
664 AT->getElementType(), AT->getSize().getZExtValue());
666 if (const RecordType *RT = Ty->getAs<RecordType>()) {
667 SmallVector<const CXXBaseSpecifier *, 1> Bases;
668 SmallVector<const FieldDecl *, 1> Fields;
669 const RecordDecl *RD = RT->getDecl();
670 assert(!RD->hasFlexibleArrayMember() &&
671 "Cannot expand structure with flexible array.");
673 // Unions can be here only in degenerative cases - all the fields are same
674 // after flattening. Thus we have to use the "largest" field.
675 const FieldDecl *LargestFD = nullptr;
676 CharUnits UnionSize = CharUnits::Zero();
678 for (const auto *FD : RD->fields()) {
679 // Skip zero length bitfields.
680 if (FD->isBitField() && FD->getBitWidthValue(Context) == 0)
682 assert(!FD->isBitField() &&
683 "Cannot expand structure with bit-field members.");
684 CharUnits FieldSize = Context.getTypeSizeInChars(FD->getType());
685 if (UnionSize < FieldSize) {
686 UnionSize = FieldSize;
691 Fields.push_back(LargestFD);
693 if (const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
694 assert(!CXXRD->isDynamicClass() &&
695 "cannot expand vtable pointers in dynamic classes");
696 for (const CXXBaseSpecifier &BS : CXXRD->bases())
697 Bases.push_back(&BS);
700 for (const auto *FD : RD->fields()) {
701 // Skip zero length bitfields.
702 if (FD->isBitField() && FD->getBitWidthValue(Context) == 0)
704 assert(!FD->isBitField() &&
705 "Cannot expand structure with bit-field members.");
706 Fields.push_back(FD);
709 return llvm::make_unique<RecordExpansion>(std::move(Bases),
712 if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
713 return llvm::make_unique<ComplexExpansion>(CT->getElementType());
715 return llvm::make_unique<NoExpansion>();
718 static int getExpansionSize(QualType Ty, const ASTContext &Context) {
719 auto Exp = getTypeExpansion(Ty, Context);
720 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
721 return CAExp->NumElts * getExpansionSize(CAExp->EltTy, Context);
723 if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
725 for (auto BS : RExp->Bases)
726 Res += getExpansionSize(BS->getType(), Context);
727 for (auto FD : RExp->Fields)
728 Res += getExpansionSize(FD->getType(), Context);
731 if (isa<ComplexExpansion>(Exp.get()))
733 assert(isa<NoExpansion>(Exp.get()));
738 CodeGenTypes::getExpandedTypes(QualType Ty,
739 SmallVectorImpl<llvm::Type *>::iterator &TI) {
740 auto Exp = getTypeExpansion(Ty, Context);
741 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
742 for (int i = 0, n = CAExp->NumElts; i < n; i++) {
743 getExpandedTypes(CAExp->EltTy, TI);
745 } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
746 for (auto BS : RExp->Bases)
747 getExpandedTypes(BS->getType(), TI);
748 for (auto FD : RExp->Fields)
749 getExpandedTypes(FD->getType(), TI);
750 } else if (auto CExp = dyn_cast<ComplexExpansion>(Exp.get())) {
751 llvm::Type *EltTy = ConvertType(CExp->EltTy);
755 assert(isa<NoExpansion>(Exp.get()));
756 *TI++ = ConvertType(Ty);
760 static void forConstantArrayExpansion(CodeGenFunction &CGF,
761 ConstantArrayExpansion *CAE,
763 llvm::function_ref<void(Address)> Fn) {
764 CharUnits EltSize = CGF.getContext().getTypeSizeInChars(CAE->EltTy);
766 BaseAddr.getAlignment().alignmentOfArrayElement(EltSize);
768 for (int i = 0, n = CAE->NumElts; i < n; i++) {
769 llvm::Value *EltAddr =
770 CGF.Builder.CreateConstGEP2_32(nullptr, BaseAddr.getPointer(), 0, i);
771 Fn(Address(EltAddr, EltAlign));
775 void CodeGenFunction::ExpandTypeFromArgs(
776 QualType Ty, LValue LV, SmallVectorImpl<llvm::Argument *>::iterator &AI) {
777 assert(LV.isSimple() &&
778 "Unexpected non-simple lvalue during struct expansion.");
780 auto Exp = getTypeExpansion(Ty, getContext());
781 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
782 forConstantArrayExpansion(*this, CAExp, LV.getAddress(),
783 [&](Address EltAddr) {
784 LValue LV = MakeAddrLValue(EltAddr, CAExp->EltTy);
785 ExpandTypeFromArgs(CAExp->EltTy, LV, AI);
787 } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
788 Address This = LV.getAddress();
789 for (const CXXBaseSpecifier *BS : RExp->Bases) {
790 // Perform a single step derived-to-base conversion.
792 GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1,
793 /*NullCheckValue=*/false, SourceLocation());
794 LValue SubLV = MakeAddrLValue(Base, BS->getType());
796 // Recurse onto bases.
797 ExpandTypeFromArgs(BS->getType(), SubLV, AI);
799 for (auto FD : RExp->Fields) {
800 // FIXME: What are the right qualifiers here?
801 LValue SubLV = EmitLValueForField(LV, FD);
802 ExpandTypeFromArgs(FD->getType(), SubLV, AI);
804 } else if (isa<ComplexExpansion>(Exp.get())) {
805 auto realValue = *AI++;
806 auto imagValue = *AI++;
807 EmitStoreOfComplex(ComplexPairTy(realValue, imagValue), LV, /*init*/ true);
809 assert(isa<NoExpansion>(Exp.get()));
810 EmitStoreThroughLValue(RValue::get(*AI++), LV);
814 void CodeGenFunction::ExpandTypeToArgs(
815 QualType Ty, RValue RV, llvm::FunctionType *IRFuncTy,
816 SmallVectorImpl<llvm::Value *> &IRCallArgs, unsigned &IRCallArgPos) {
817 auto Exp = getTypeExpansion(Ty, getContext());
818 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
819 forConstantArrayExpansion(*this, CAExp, RV.getAggregateAddress(),
820 [&](Address EltAddr) {
822 convertTempToRValue(EltAddr, CAExp->EltTy, SourceLocation());
823 ExpandTypeToArgs(CAExp->EltTy, EltRV, IRFuncTy, IRCallArgs, IRCallArgPos);
825 } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
826 Address This = RV.getAggregateAddress();
827 for (const CXXBaseSpecifier *BS : RExp->Bases) {
828 // Perform a single step derived-to-base conversion.
830 GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1,
831 /*NullCheckValue=*/false, SourceLocation());
832 RValue BaseRV = RValue::getAggregate(Base);
834 // Recurse onto bases.
835 ExpandTypeToArgs(BS->getType(), BaseRV, IRFuncTy, IRCallArgs,
839 LValue LV = MakeAddrLValue(This, Ty);
840 for (auto FD : RExp->Fields) {
841 RValue FldRV = EmitRValueForField(LV, FD, SourceLocation());
842 ExpandTypeToArgs(FD->getType(), FldRV, IRFuncTy, IRCallArgs,
845 } else if (isa<ComplexExpansion>(Exp.get())) {
846 ComplexPairTy CV = RV.getComplexVal();
847 IRCallArgs[IRCallArgPos++] = CV.first;
848 IRCallArgs[IRCallArgPos++] = CV.second;
850 assert(isa<NoExpansion>(Exp.get()));
851 assert(RV.isScalar() &&
852 "Unexpected non-scalar rvalue during struct expansion.");
854 // Insert a bitcast as needed.
855 llvm::Value *V = RV.getScalarVal();
856 if (IRCallArgPos < IRFuncTy->getNumParams() &&
857 V->getType() != IRFuncTy->getParamType(IRCallArgPos))
858 V = Builder.CreateBitCast(V, IRFuncTy->getParamType(IRCallArgPos));
860 IRCallArgs[IRCallArgPos++] = V;
864 /// Create a temporary allocation for the purposes of coercion.
865 static Address CreateTempAllocaForCoercion(CodeGenFunction &CGF, llvm::Type *Ty,
866 CharUnits MinAlign) {
867 // Don't use an alignment that's worse than what LLVM would prefer.
868 auto PrefAlign = CGF.CGM.getDataLayout().getPrefTypeAlignment(Ty);
869 CharUnits Align = std::max(MinAlign, CharUnits::fromQuantity(PrefAlign));
871 return CGF.CreateTempAlloca(Ty, Align);
874 /// EnterStructPointerForCoercedAccess - Given a struct pointer that we are
875 /// accessing some number of bytes out of it, try to gep into the struct to get
876 /// at its inner goodness. Dive as deep as possible without entering an element
877 /// with an in-memory size smaller than DstSize.
879 EnterStructPointerForCoercedAccess(Address SrcPtr,
880 llvm::StructType *SrcSTy,
881 uint64_t DstSize, CodeGenFunction &CGF) {
882 // We can't dive into a zero-element struct.
883 if (SrcSTy->getNumElements() == 0) return SrcPtr;
885 llvm::Type *FirstElt = SrcSTy->getElementType(0);
887 // If the first elt is at least as large as what we're looking for, or if the
888 // first element is the same size as the whole struct, we can enter it. The
889 // comparison must be made on the store size and not the alloca size. Using
890 // the alloca size may overstate the size of the load.
891 uint64_t FirstEltSize =
892 CGF.CGM.getDataLayout().getTypeStoreSize(FirstElt);
893 if (FirstEltSize < DstSize &&
894 FirstEltSize < CGF.CGM.getDataLayout().getTypeStoreSize(SrcSTy))
897 // GEP into the first element.
898 SrcPtr = CGF.Builder.CreateStructGEP(SrcPtr, 0, CharUnits(), "coerce.dive");
900 // If the first element is a struct, recurse.
901 llvm::Type *SrcTy = SrcPtr.getElementType();
902 if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy))
903 return EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF);
908 /// CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both
909 /// are either integers or pointers. This does a truncation of the value if it
910 /// is too large or a zero extension if it is too small.
912 /// This behaves as if the value were coerced through memory, so on big-endian
913 /// targets the high bits are preserved in a truncation, while little-endian
914 /// targets preserve the low bits.
915 static llvm::Value *CoerceIntOrPtrToIntOrPtr(llvm::Value *Val,
917 CodeGenFunction &CGF) {
918 if (Val->getType() == Ty)
921 if (isa<llvm::PointerType>(Val->getType())) {
922 // If this is Pointer->Pointer avoid conversion to and from int.
923 if (isa<llvm::PointerType>(Ty))
924 return CGF.Builder.CreateBitCast(Val, Ty, "coerce.val");
926 // Convert the pointer to an integer so we can play with its width.
927 Val = CGF.Builder.CreatePtrToInt(Val, CGF.IntPtrTy, "coerce.val.pi");
930 llvm::Type *DestIntTy = Ty;
931 if (isa<llvm::PointerType>(DestIntTy))
932 DestIntTy = CGF.IntPtrTy;
934 if (Val->getType() != DestIntTy) {
935 const llvm::DataLayout &DL = CGF.CGM.getDataLayout();
936 if (DL.isBigEndian()) {
937 // Preserve the high bits on big-endian targets.
938 // That is what memory coercion does.
939 uint64_t SrcSize = DL.getTypeSizeInBits(Val->getType());
940 uint64_t DstSize = DL.getTypeSizeInBits(DestIntTy);
942 if (SrcSize > DstSize) {
943 Val = CGF.Builder.CreateLShr(Val, SrcSize - DstSize, "coerce.highbits");
944 Val = CGF.Builder.CreateTrunc(Val, DestIntTy, "coerce.val.ii");
946 Val = CGF.Builder.CreateZExt(Val, DestIntTy, "coerce.val.ii");
947 Val = CGF.Builder.CreateShl(Val, DstSize - SrcSize, "coerce.highbits");
950 // Little-endian targets preserve the low bits. No shifts required.
951 Val = CGF.Builder.CreateIntCast(Val, DestIntTy, false, "coerce.val.ii");
955 if (isa<llvm::PointerType>(Ty))
956 Val = CGF.Builder.CreateIntToPtr(Val, Ty, "coerce.val.ip");
962 /// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as
963 /// a pointer to an object of type \arg Ty, known to be aligned to
964 /// \arg SrcAlign bytes.
966 /// This safely handles the case when the src type is smaller than the
967 /// destination type; in this situation the values of bits which not
968 /// present in the src are undefined.
969 static llvm::Value *CreateCoercedLoad(Address Src, llvm::Type *Ty,
970 CodeGenFunction &CGF) {
971 llvm::Type *SrcTy = Src.getElementType();
973 // If SrcTy and Ty are the same, just do a load.
975 return CGF.Builder.CreateLoad(Src);
977 uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(Ty);
979 if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) {
980 Src = EnterStructPointerForCoercedAccess(Src, SrcSTy, DstSize, CGF);
981 SrcTy = Src.getType()->getElementType();
984 uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);
986 // If the source and destination are integer or pointer types, just do an
987 // extension or truncation to the desired type.
988 if ((isa<llvm::IntegerType>(Ty) || isa<llvm::PointerType>(Ty)) &&
989 (isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy))) {
990 llvm::Value *Load = CGF.Builder.CreateLoad(Src);
991 return CoerceIntOrPtrToIntOrPtr(Load, Ty, CGF);
994 // If load is legal, just bitcast the src pointer.
995 if (SrcSize >= DstSize) {
996 // Generally SrcSize is never greater than DstSize, since this means we are
997 // losing bits. However, this can happen in cases where the structure has
998 // additional padding, for example due to a user specified alignment.
1000 // FIXME: Assert that we aren't truncating non-padding bits when have access
1001 // to that information.
1002 Src = CGF.Builder.CreateBitCast(Src, llvm::PointerType::getUnqual(Ty));
1003 return CGF.Builder.CreateLoad(Src);
1006 // Otherwise do coercion through memory. This is stupid, but simple.
1007 Address Tmp = CreateTempAllocaForCoercion(CGF, Ty, Src.getAlignment());
1008 Address Casted = CGF.Builder.CreateBitCast(Tmp, CGF.Int8PtrTy);
1009 Address SrcCasted = CGF.Builder.CreateBitCast(Src, CGF.Int8PtrTy);
1010 CGF.Builder.CreateMemCpy(Casted, SrcCasted,
1011 llvm::ConstantInt::get(CGF.IntPtrTy, SrcSize),
1013 return CGF.Builder.CreateLoad(Tmp);
1016 // Function to store a first-class aggregate into memory. We prefer to
1017 // store the elements rather than the aggregate to be more friendly to
1019 // FIXME: Do we need to recurse here?
1020 static void BuildAggStore(CodeGenFunction &CGF, llvm::Value *Val,
1021 Address Dest, bool DestIsVolatile) {
1022 // Prefer scalar stores to first-class aggregate stores.
1023 if (llvm::StructType *STy =
1024 dyn_cast<llvm::StructType>(Val->getType())) {
1025 const llvm::StructLayout *Layout =
1026 CGF.CGM.getDataLayout().getStructLayout(STy);
1028 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1029 auto EltOffset = CharUnits::fromQuantity(Layout->getElementOffset(i));
1030 Address EltPtr = CGF.Builder.CreateStructGEP(Dest, i, EltOffset);
1031 llvm::Value *Elt = CGF.Builder.CreateExtractValue(Val, i);
1032 CGF.Builder.CreateStore(Elt, EltPtr, DestIsVolatile);
1035 CGF.Builder.CreateStore(Val, Dest, DestIsVolatile);
1039 /// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src,
1040 /// where the source and destination may have different types. The
1041 /// destination is known to be aligned to \arg DstAlign bytes.
1043 /// This safely handles the case when the src type is larger than the
1044 /// destination type; the upper bits of the src will be lost.
1045 static void CreateCoercedStore(llvm::Value *Src,
1048 CodeGenFunction &CGF) {
1049 llvm::Type *SrcTy = Src->getType();
1050 llvm::Type *DstTy = Dst.getType()->getElementType();
1051 if (SrcTy == DstTy) {
1052 CGF.Builder.CreateStore(Src, Dst, DstIsVolatile);
1056 uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);
1058 if (llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(DstTy)) {
1059 Dst = EnterStructPointerForCoercedAccess(Dst, DstSTy, SrcSize, CGF);
1060 DstTy = Dst.getType()->getElementType();
1063 // If the source and destination are integer or pointer types, just do an
1064 // extension or truncation to the desired type.
1065 if ((isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy)) &&
1066 (isa<llvm::IntegerType>(DstTy) || isa<llvm::PointerType>(DstTy))) {
1067 Src = CoerceIntOrPtrToIntOrPtr(Src, DstTy, CGF);
1068 CGF.Builder.CreateStore(Src, Dst, DstIsVolatile);
1072 uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(DstTy);
1074 // If store is legal, just bitcast the src pointer.
1075 if (SrcSize <= DstSize) {
1076 Dst = CGF.Builder.CreateBitCast(Dst, llvm::PointerType::getUnqual(SrcTy));
1077 BuildAggStore(CGF, Src, Dst, DstIsVolatile);
1079 // Otherwise do coercion through memory. This is stupid, but
1082 // Generally SrcSize is never greater than DstSize, since this means we are
1083 // losing bits. However, this can happen in cases where the structure has
1084 // additional padding, for example due to a user specified alignment.
1086 // FIXME: Assert that we aren't truncating non-padding bits when have access
1087 // to that information.
1088 Address Tmp = CreateTempAllocaForCoercion(CGF, SrcTy, Dst.getAlignment());
1089 CGF.Builder.CreateStore(Src, Tmp);
1090 Address Casted = CGF.Builder.CreateBitCast(Tmp, CGF.Int8PtrTy);
1091 Address DstCasted = CGF.Builder.CreateBitCast(Dst, CGF.Int8PtrTy);
1092 CGF.Builder.CreateMemCpy(DstCasted, Casted,
1093 llvm::ConstantInt::get(CGF.IntPtrTy, DstSize),
1098 static Address emitAddressAtOffset(CodeGenFunction &CGF, Address addr,
1099 const ABIArgInfo &info) {
1100 if (unsigned offset = info.getDirectOffset()) {
1101 addr = CGF.Builder.CreateElementBitCast(addr, CGF.Int8Ty);
1102 addr = CGF.Builder.CreateConstInBoundsByteGEP(addr,
1103 CharUnits::fromQuantity(offset));
1104 addr = CGF.Builder.CreateElementBitCast(addr, info.getCoerceToType());
1111 /// Encapsulates information about the way function arguments from
1112 /// CGFunctionInfo should be passed to actual LLVM IR function.
1113 class ClangToLLVMArgMapping {
1114 static const unsigned InvalidIndex = ~0U;
1115 unsigned InallocaArgNo;
1117 unsigned TotalIRArgs;
1119 /// Arguments of LLVM IR function corresponding to single Clang argument.
1121 unsigned PaddingArgIndex;
1122 // Argument is expanded to IR arguments at positions
1123 // [FirstArgIndex, FirstArgIndex + NumberOfArgs).
1124 unsigned FirstArgIndex;
1125 unsigned NumberOfArgs;
1128 : PaddingArgIndex(InvalidIndex), FirstArgIndex(InvalidIndex),
1132 SmallVector<IRArgs, 8> ArgInfo;
1135 ClangToLLVMArgMapping(const ASTContext &Context, const CGFunctionInfo &FI,
1136 bool OnlyRequiredArgs = false)
1137 : InallocaArgNo(InvalidIndex), SRetArgNo(InvalidIndex), TotalIRArgs(0),
1138 ArgInfo(OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size()) {
1139 construct(Context, FI, OnlyRequiredArgs);
1142 bool hasInallocaArg() const { return InallocaArgNo != InvalidIndex; }
1143 unsigned getInallocaArgNo() const {
1144 assert(hasInallocaArg());
1145 return InallocaArgNo;
1148 bool hasSRetArg() const { return SRetArgNo != InvalidIndex; }
1149 unsigned getSRetArgNo() const {
1150 assert(hasSRetArg());
1154 unsigned totalIRArgs() const { return TotalIRArgs; }
1156 bool hasPaddingArg(unsigned ArgNo) const {
1157 assert(ArgNo < ArgInfo.size());
1158 return ArgInfo[ArgNo].PaddingArgIndex != InvalidIndex;
1160 unsigned getPaddingArgNo(unsigned ArgNo) const {
1161 assert(hasPaddingArg(ArgNo));
1162 return ArgInfo[ArgNo].PaddingArgIndex;
1165 /// Returns index of first IR argument corresponding to ArgNo, and their
1167 std::pair<unsigned, unsigned> getIRArgs(unsigned ArgNo) const {
1168 assert(ArgNo < ArgInfo.size());
1169 return std::make_pair(ArgInfo[ArgNo].FirstArgIndex,
1170 ArgInfo[ArgNo].NumberOfArgs);
1174 void construct(const ASTContext &Context, const CGFunctionInfo &FI,
1175 bool OnlyRequiredArgs);
1178 void ClangToLLVMArgMapping::construct(const ASTContext &Context,
1179 const CGFunctionInfo &FI,
1180 bool OnlyRequiredArgs) {
1181 unsigned IRArgNo = 0;
1182 bool SwapThisWithSRet = false;
1183 const ABIArgInfo &RetAI = FI.getReturnInfo();
1185 if (RetAI.getKind() == ABIArgInfo::Indirect) {
1186 SwapThisWithSRet = RetAI.isSRetAfterThis();
1187 SRetArgNo = SwapThisWithSRet ? 1 : IRArgNo++;
1191 unsigned NumArgs = OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size();
1192 for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(); ArgNo < NumArgs;
1194 assert(I != FI.arg_end());
1195 QualType ArgType = I->type;
1196 const ABIArgInfo &AI = I->info;
1197 // Collect data about IR arguments corresponding to Clang argument ArgNo.
1198 auto &IRArgs = ArgInfo[ArgNo];
1200 if (AI.getPaddingType())
1201 IRArgs.PaddingArgIndex = IRArgNo++;
1203 switch (AI.getKind()) {
1204 case ABIArgInfo::Extend:
1205 case ABIArgInfo::Direct: {
1206 // FIXME: handle sseregparm someday...
1207 llvm::StructType *STy = dyn_cast<llvm::StructType>(AI.getCoerceToType());
1208 if (AI.isDirect() && AI.getCanBeFlattened() && STy) {
1209 IRArgs.NumberOfArgs = STy->getNumElements();
1211 IRArgs.NumberOfArgs = 1;
1215 case ABIArgInfo::Indirect:
1216 IRArgs.NumberOfArgs = 1;
1218 case ABIArgInfo::Ignore:
1219 case ABIArgInfo::InAlloca:
1220 // ignore and inalloca doesn't have matching LLVM parameters.
1221 IRArgs.NumberOfArgs = 0;
1223 case ABIArgInfo::Expand: {
1224 IRArgs.NumberOfArgs = getExpansionSize(ArgType, Context);
1229 if (IRArgs.NumberOfArgs > 0) {
1230 IRArgs.FirstArgIndex = IRArgNo;
1231 IRArgNo += IRArgs.NumberOfArgs;
1234 // Skip over the sret parameter when it comes second. We already handled it
1236 if (IRArgNo == 1 && SwapThisWithSRet)
1239 assert(ArgNo == ArgInfo.size());
1241 if (FI.usesInAlloca())
1242 InallocaArgNo = IRArgNo++;
1244 TotalIRArgs = IRArgNo;
1250 bool CodeGenModule::ReturnTypeUsesSRet(const CGFunctionInfo &FI) {
1251 return FI.getReturnInfo().isIndirect();
1254 bool CodeGenModule::ReturnSlotInterferesWithArgs(const CGFunctionInfo &FI) {
1255 return ReturnTypeUsesSRet(FI) &&
1256 getTargetCodeGenInfo().doesReturnSlotInterfereWithArgs();
1259 bool CodeGenModule::ReturnTypeUsesFPRet(QualType ResultType) {
1260 if (const BuiltinType *BT = ResultType->getAs<BuiltinType>()) {
1261 switch (BT->getKind()) {
1264 case BuiltinType::Float:
1265 return getTarget().useObjCFPRetForRealType(TargetInfo::Float);
1266 case BuiltinType::Double:
1267 return getTarget().useObjCFPRetForRealType(TargetInfo::Double);
1268 case BuiltinType::LongDouble:
1269 return getTarget().useObjCFPRetForRealType(TargetInfo::LongDouble);
1276 bool CodeGenModule::ReturnTypeUsesFP2Ret(QualType ResultType) {
1277 if (const ComplexType *CT = ResultType->getAs<ComplexType>()) {
1278 if (const BuiltinType *BT = CT->getElementType()->getAs<BuiltinType>()) {
1279 if (BT->getKind() == BuiltinType::LongDouble)
1280 return getTarget().useObjCFP2RetForComplexLongDouble();
1287 llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) {
1288 const CGFunctionInfo &FI = arrangeGlobalDeclaration(GD);
1289 return GetFunctionType(FI);
1292 llvm::FunctionType *
1293 CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI) {
1295 bool Inserted = FunctionsBeingProcessed.insert(&FI).second;
1297 assert(Inserted && "Recursively being processed?");
1299 llvm::Type *resultType = nullptr;
1300 const ABIArgInfo &retAI = FI.getReturnInfo();
1301 switch (retAI.getKind()) {
1302 case ABIArgInfo::Expand:
1303 llvm_unreachable("Invalid ABI kind for return argument");
1305 case ABIArgInfo::Extend:
1306 case ABIArgInfo::Direct:
1307 resultType = retAI.getCoerceToType();
1310 case ABIArgInfo::InAlloca:
1311 if (retAI.getInAllocaSRet()) {
1312 // sret things on win32 aren't void, they return the sret pointer.
1313 QualType ret = FI.getReturnType();
1314 llvm::Type *ty = ConvertType(ret);
1315 unsigned addressSpace = Context.getTargetAddressSpace(ret);
1316 resultType = llvm::PointerType::get(ty, addressSpace);
1318 resultType = llvm::Type::getVoidTy(getLLVMContext());
1322 case ABIArgInfo::Indirect:
1323 case ABIArgInfo::Ignore:
1324 resultType = llvm::Type::getVoidTy(getLLVMContext());
1328 ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI, true);
1329 SmallVector<llvm::Type*, 8> ArgTypes(IRFunctionArgs.totalIRArgs());
1331 // Add type for sret argument.
1332 if (IRFunctionArgs.hasSRetArg()) {
1333 QualType Ret = FI.getReturnType();
1334 llvm::Type *Ty = ConvertType(Ret);
1335 unsigned AddressSpace = Context.getTargetAddressSpace(Ret);
1336 ArgTypes[IRFunctionArgs.getSRetArgNo()] =
1337 llvm::PointerType::get(Ty, AddressSpace);
1340 // Add type for inalloca argument.
1341 if (IRFunctionArgs.hasInallocaArg()) {
1342 auto ArgStruct = FI.getArgStruct();
1344 ArgTypes[IRFunctionArgs.getInallocaArgNo()] = ArgStruct->getPointerTo();
1347 // Add in all of the required arguments.
1349 CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
1350 ie = it + FI.getNumRequiredArgs();
1351 for (; it != ie; ++it, ++ArgNo) {
1352 const ABIArgInfo &ArgInfo = it->info;
1354 // Insert a padding type to ensure proper alignment.
1355 if (IRFunctionArgs.hasPaddingArg(ArgNo))
1356 ArgTypes[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
1357 ArgInfo.getPaddingType();
1359 unsigned FirstIRArg, NumIRArgs;
1360 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
1362 switch (ArgInfo.getKind()) {
1363 case ABIArgInfo::Ignore:
1364 case ABIArgInfo::InAlloca:
1365 assert(NumIRArgs == 0);
1368 case ABIArgInfo::Indirect: {
1369 assert(NumIRArgs == 1);
1370 // indirect arguments are always on the stack, which is addr space #0.
1371 llvm::Type *LTy = ConvertTypeForMem(it->type);
1372 ArgTypes[FirstIRArg] = LTy->getPointerTo();
1376 case ABIArgInfo::Extend:
1377 case ABIArgInfo::Direct: {
1378 // Fast-isel and the optimizer generally like scalar values better than
1379 // FCAs, so we flatten them if this is safe to do for this argument.
1380 llvm::Type *argType = ArgInfo.getCoerceToType();
1381 llvm::StructType *st = dyn_cast<llvm::StructType>(argType);
1382 if (st && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) {
1383 assert(NumIRArgs == st->getNumElements());
1384 for (unsigned i = 0, e = st->getNumElements(); i != e; ++i)
1385 ArgTypes[FirstIRArg + i] = st->getElementType(i);
1387 assert(NumIRArgs == 1);
1388 ArgTypes[FirstIRArg] = argType;
1393 case ABIArgInfo::Expand:
1394 auto ArgTypesIter = ArgTypes.begin() + FirstIRArg;
1395 getExpandedTypes(it->type, ArgTypesIter);
1396 assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs);
1401 bool Erased = FunctionsBeingProcessed.erase(&FI); (void)Erased;
1402 assert(Erased && "Not in set?");
1404 return llvm::FunctionType::get(resultType, ArgTypes, FI.isVariadic());
1407 llvm::Type *CodeGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) {
1408 const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
1409 const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
1411 if (!isFuncTypeConvertible(FPT))
1412 return llvm::StructType::get(getLLVMContext());
1414 const CGFunctionInfo *Info;
1415 if (isa<CXXDestructorDecl>(MD))
1417 &arrangeCXXStructorDeclaration(MD, getFromDtorType(GD.getDtorType()));
1419 Info = &arrangeCXXMethodDeclaration(MD);
1420 return GetFunctionType(*Info);
1423 static void AddAttributesFromFunctionProtoType(ASTContext &Ctx,
1424 llvm::AttrBuilder &FuncAttrs,
1425 const FunctionProtoType *FPT) {
1429 if (!isUnresolvedExceptionSpec(FPT->getExceptionSpecType()) &&
1430 FPT->isNothrow(Ctx))
1431 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1434 void CodeGenModule::ConstructAttributeList(
1435 StringRef Name, const CGFunctionInfo &FI, CGCalleeInfo CalleeInfo,
1436 AttributeListType &PAL, unsigned &CallingConv, bool AttrOnCallSite) {
1437 llvm::AttrBuilder FuncAttrs;
1438 llvm::AttrBuilder RetAttrs;
1439 bool HasOptnone = false;
1441 CallingConv = FI.getEffectiveCallingConvention();
1443 if (FI.isNoReturn())
1444 FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
1446 // If we have information about the function prototype, we can learn
1447 // attributes form there.
1448 AddAttributesFromFunctionProtoType(getContext(), FuncAttrs,
1449 CalleeInfo.getCalleeFunctionProtoType());
1451 const Decl *TargetDecl = CalleeInfo.getCalleeDecl();
1453 // FIXME: handle sseregparm someday...
1455 if (TargetDecl->hasAttr<ReturnsTwiceAttr>())
1456 FuncAttrs.addAttribute(llvm::Attribute::ReturnsTwice);
1457 if (TargetDecl->hasAttr<NoThrowAttr>())
1458 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1459 if (TargetDecl->hasAttr<NoReturnAttr>())
1460 FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
1461 if (TargetDecl->hasAttr<NoDuplicateAttr>())
1462 FuncAttrs.addAttribute(llvm::Attribute::NoDuplicate);
1464 if (const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
1465 AddAttributesFromFunctionProtoType(
1466 getContext(), FuncAttrs, Fn->getType()->getAs<FunctionProtoType>());
1467 // Don't use [[noreturn]] or _Noreturn for a call to a virtual function.
1468 // These attributes are not inherited by overloads.
1469 const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Fn);
1470 if (Fn->isNoReturn() && !(AttrOnCallSite && MD && MD->isVirtual()))
1471 FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
1474 // 'const', 'pure' and 'noalias' attributed functions are also nounwind.
1475 if (TargetDecl->hasAttr<ConstAttr>()) {
1476 FuncAttrs.addAttribute(llvm::Attribute::ReadNone);
1477 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1478 } else if (TargetDecl->hasAttr<PureAttr>()) {
1479 FuncAttrs.addAttribute(llvm::Attribute::ReadOnly);
1480 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1481 } else if (TargetDecl->hasAttr<NoAliasAttr>()) {
1482 FuncAttrs.addAttribute(llvm::Attribute::ArgMemOnly);
1483 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1485 if (TargetDecl->hasAttr<RestrictAttr>())
1486 RetAttrs.addAttribute(llvm::Attribute::NoAlias);
1487 if (TargetDecl->hasAttr<ReturnsNonNullAttr>())
1488 RetAttrs.addAttribute(llvm::Attribute::NonNull);
1490 HasOptnone = TargetDecl->hasAttr<OptimizeNoneAttr>();
1493 // OptimizeNoneAttr takes precedence over -Os or -Oz. No warning needed.
1495 if (CodeGenOpts.OptimizeSize)
1496 FuncAttrs.addAttribute(llvm::Attribute::OptimizeForSize);
1497 if (CodeGenOpts.OptimizeSize == 2)
1498 FuncAttrs.addAttribute(llvm::Attribute::MinSize);
1501 if (CodeGenOpts.DisableRedZone)
1502 FuncAttrs.addAttribute(llvm::Attribute::NoRedZone);
1503 if (CodeGenOpts.NoImplicitFloat)
1504 FuncAttrs.addAttribute(llvm::Attribute::NoImplicitFloat);
1505 if (CodeGenOpts.EnableSegmentedStacks &&
1506 !(TargetDecl && TargetDecl->hasAttr<NoSplitStackAttr>()))
1507 FuncAttrs.addAttribute("split-stack");
1509 if (AttrOnCallSite) {
1510 // Attributes that should go on the call site only.
1511 if (!CodeGenOpts.SimplifyLibCalls ||
1512 CodeGenOpts.isNoBuiltinFunc(Name.data()))
1513 FuncAttrs.addAttribute(llvm::Attribute::NoBuiltin);
1514 if (!CodeGenOpts.TrapFuncName.empty())
1515 FuncAttrs.addAttribute("trap-func-name", CodeGenOpts.TrapFuncName);
1517 // Attributes that should go on the function, but not the call site.
1518 if (!CodeGenOpts.DisableFPElim) {
1519 FuncAttrs.addAttribute("no-frame-pointer-elim", "false");
1520 } else if (CodeGenOpts.OmitLeafFramePointer) {
1521 FuncAttrs.addAttribute("no-frame-pointer-elim", "false");
1522 FuncAttrs.addAttribute("no-frame-pointer-elim-non-leaf");
1524 FuncAttrs.addAttribute("no-frame-pointer-elim", "true");
1525 FuncAttrs.addAttribute("no-frame-pointer-elim-non-leaf");
1528 bool DisableTailCalls =
1529 CodeGenOpts.DisableTailCalls ||
1530 (TargetDecl && TargetDecl->hasAttr<DisableTailCallsAttr>());
1531 FuncAttrs.addAttribute("disable-tail-calls",
1532 llvm::toStringRef(DisableTailCalls));
1534 FuncAttrs.addAttribute("less-precise-fpmad",
1535 llvm::toStringRef(CodeGenOpts.LessPreciseFPMAD));
1536 FuncAttrs.addAttribute("no-infs-fp-math",
1537 llvm::toStringRef(CodeGenOpts.NoInfsFPMath));
1538 FuncAttrs.addAttribute("no-nans-fp-math",
1539 llvm::toStringRef(CodeGenOpts.NoNaNsFPMath));
1540 FuncAttrs.addAttribute("unsafe-fp-math",
1541 llvm::toStringRef(CodeGenOpts.UnsafeFPMath));
1542 FuncAttrs.addAttribute("use-soft-float",
1543 llvm::toStringRef(CodeGenOpts.SoftFloat));
1544 FuncAttrs.addAttribute("stack-protector-buffer-size",
1545 llvm::utostr(CodeGenOpts.SSPBufferSize));
1547 if (CodeGenOpts.StackRealignment)
1548 FuncAttrs.addAttribute("stackrealign");
1550 // Add target-cpu and target-features attributes to functions. If
1551 // we have a decl for the function and it has a target attribute then
1552 // parse that and add it to the feature set.
1553 StringRef TargetCPU = getTarget().getTargetOpts().CPU;
1554 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl);
1555 if (FD && FD->hasAttr<TargetAttr>()) {
1556 llvm::StringMap<bool> FeatureMap;
1557 getFunctionFeatureMap(FeatureMap, FD);
1559 // Produce the canonical string for this set of features.
1560 std::vector<std::string> Features;
1561 for (llvm::StringMap<bool>::const_iterator it = FeatureMap.begin(),
1562 ie = FeatureMap.end();
1564 Features.push_back((it->second ? "+" : "-") + it->first().str());
1566 // Now add the target-cpu and target-features to the function.
1567 // While we populated the feature map above, we still need to
1568 // get and parse the target attribute so we can get the cpu for
1570 const auto *TD = FD->getAttr<TargetAttr>();
1571 TargetAttr::ParsedTargetAttr ParsedAttr = TD->parse();
1572 if (ParsedAttr.second != "")
1573 TargetCPU = ParsedAttr.second;
1574 if (TargetCPU != "")
1575 FuncAttrs.addAttribute("target-cpu", TargetCPU);
1576 if (!Features.empty()) {
1577 std::sort(Features.begin(), Features.end());
1578 FuncAttrs.addAttribute(
1580 llvm::join(Features.begin(), Features.end(), ","));
1583 // Otherwise just add the existing target cpu and target features to the
1585 std::vector<std::string> &Features = getTarget().getTargetOpts().Features;
1586 if (TargetCPU != "")
1587 FuncAttrs.addAttribute("target-cpu", TargetCPU);
1588 if (!Features.empty()) {
1589 std::sort(Features.begin(), Features.end());
1590 FuncAttrs.addAttribute(
1592 llvm::join(Features.begin(), Features.end(), ","));
1597 ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI);
1599 QualType RetTy = FI.getReturnType();
1600 const ABIArgInfo &RetAI = FI.getReturnInfo();
1601 switch (RetAI.getKind()) {
1602 case ABIArgInfo::Extend:
1603 if (RetTy->hasSignedIntegerRepresentation())
1604 RetAttrs.addAttribute(llvm::Attribute::SExt);
1605 else if (RetTy->hasUnsignedIntegerRepresentation())
1606 RetAttrs.addAttribute(llvm::Attribute::ZExt);
1608 case ABIArgInfo::Direct:
1609 if (RetAI.getInReg())
1610 RetAttrs.addAttribute(llvm::Attribute::InReg);
1612 case ABIArgInfo::Ignore:
1615 case ABIArgInfo::InAlloca:
1616 case ABIArgInfo::Indirect: {
1617 // inalloca and sret disable readnone and readonly
1618 FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
1619 .removeAttribute(llvm::Attribute::ReadNone);
1623 case ABIArgInfo::Expand:
1624 llvm_unreachable("Invalid ABI kind for return argument");
1627 if (const auto *RefTy = RetTy->getAs<ReferenceType>()) {
1628 QualType PTy = RefTy->getPointeeType();
1629 if (!PTy->isIncompleteType() && PTy->isConstantSizeType())
1630 RetAttrs.addDereferenceableAttr(getContext().getTypeSizeInChars(PTy)
1632 else if (getContext().getTargetAddressSpace(PTy) == 0)
1633 RetAttrs.addAttribute(llvm::Attribute::NonNull);
1636 // Attach return attributes.
1637 if (RetAttrs.hasAttributes()) {
1638 PAL.push_back(llvm::AttributeSet::get(
1639 getLLVMContext(), llvm::AttributeSet::ReturnIndex, RetAttrs));
1642 // Attach attributes to sret.
1643 if (IRFunctionArgs.hasSRetArg()) {
1644 llvm::AttrBuilder SRETAttrs;
1645 SRETAttrs.addAttribute(llvm::Attribute::StructRet);
1646 if (RetAI.getInReg())
1647 SRETAttrs.addAttribute(llvm::Attribute::InReg);
1648 PAL.push_back(llvm::AttributeSet::get(
1649 getLLVMContext(), IRFunctionArgs.getSRetArgNo() + 1, SRETAttrs));
1652 // Attach attributes to inalloca argument.
1653 if (IRFunctionArgs.hasInallocaArg()) {
1654 llvm::AttrBuilder Attrs;
1655 Attrs.addAttribute(llvm::Attribute::InAlloca);
1656 PAL.push_back(llvm::AttributeSet::get(
1657 getLLVMContext(), IRFunctionArgs.getInallocaArgNo() + 1, Attrs));
1661 for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(),
1663 I != E; ++I, ++ArgNo) {
1664 QualType ParamType = I->type;
1665 const ABIArgInfo &AI = I->info;
1666 llvm::AttrBuilder Attrs;
1668 // Add attribute for padding argument, if necessary.
1669 if (IRFunctionArgs.hasPaddingArg(ArgNo)) {
1670 if (AI.getPaddingInReg())
1671 PAL.push_back(llvm::AttributeSet::get(
1672 getLLVMContext(), IRFunctionArgs.getPaddingArgNo(ArgNo) + 1,
1673 llvm::Attribute::InReg));
1676 // 'restrict' -> 'noalias' is done in EmitFunctionProlog when we
1677 // have the corresponding parameter variable. It doesn't make
1678 // sense to do it here because parameters are so messed up.
1679 switch (AI.getKind()) {
1680 case ABIArgInfo::Extend:
1681 if (ParamType->isSignedIntegerOrEnumerationType())
1682 Attrs.addAttribute(llvm::Attribute::SExt);
1683 else if (ParamType->isUnsignedIntegerOrEnumerationType()) {
1684 if (getTypes().getABIInfo().shouldSignExtUnsignedType(ParamType))
1685 Attrs.addAttribute(llvm::Attribute::SExt);
1687 Attrs.addAttribute(llvm::Attribute::ZExt);
1690 case ABIArgInfo::Direct:
1691 if (ArgNo == 0 && FI.isChainCall())
1692 Attrs.addAttribute(llvm::Attribute::Nest);
1693 else if (AI.getInReg())
1694 Attrs.addAttribute(llvm::Attribute::InReg);
1697 case ABIArgInfo::Indirect: {
1699 Attrs.addAttribute(llvm::Attribute::InReg);
1701 if (AI.getIndirectByVal())
1702 Attrs.addAttribute(llvm::Attribute::ByVal);
1704 CharUnits Align = AI.getIndirectAlign();
1706 // In a byval argument, it is important that the required
1707 // alignment of the type is honored, as LLVM might be creating a
1708 // *new* stack object, and needs to know what alignment to give
1709 // it. (Sometimes it can deduce a sensible alignment on its own,
1710 // but not if clang decides it must emit a packed struct, or the
1711 // user specifies increased alignment requirements.)
1713 // This is different from indirect *not* byval, where the object
1714 // exists already, and the align attribute is purely
1716 assert(!Align.isZero());
1718 // For now, only add this when we have a byval argument.
1719 // TODO: be less lazy about updating test cases.
1720 if (AI.getIndirectByVal())
1721 Attrs.addAlignmentAttr(Align.getQuantity());
1723 // byval disables readnone and readonly.
1724 FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
1725 .removeAttribute(llvm::Attribute::ReadNone);
1728 case ABIArgInfo::Ignore:
1729 case ABIArgInfo::Expand:
1732 case ABIArgInfo::InAlloca:
1733 // inalloca disables readnone and readonly.
1734 FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
1735 .removeAttribute(llvm::Attribute::ReadNone);
1739 if (const auto *RefTy = ParamType->getAs<ReferenceType>()) {
1740 QualType PTy = RefTy->getPointeeType();
1741 if (!PTy->isIncompleteType() && PTy->isConstantSizeType())
1742 Attrs.addDereferenceableAttr(getContext().getTypeSizeInChars(PTy)
1744 else if (getContext().getTargetAddressSpace(PTy) == 0)
1745 Attrs.addAttribute(llvm::Attribute::NonNull);
1748 if (Attrs.hasAttributes()) {
1749 unsigned FirstIRArg, NumIRArgs;
1750 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
1751 for (unsigned i = 0; i < NumIRArgs; i++)
1752 PAL.push_back(llvm::AttributeSet::get(getLLVMContext(),
1753 FirstIRArg + i + 1, Attrs));
1756 assert(ArgNo == FI.arg_size());
1758 if (FuncAttrs.hasAttributes())
1759 PAL.push_back(llvm::
1760 AttributeSet::get(getLLVMContext(),
1761 llvm::AttributeSet::FunctionIndex,
1765 /// An argument came in as a promoted argument; demote it back to its
1767 static llvm::Value *emitArgumentDemotion(CodeGenFunction &CGF,
1769 llvm::Value *value) {
1770 llvm::Type *varType = CGF.ConvertType(var->getType());
1772 // This can happen with promotions that actually don't change the
1773 // underlying type, like the enum promotions.
1774 if (value->getType() == varType) return value;
1776 assert((varType->isIntegerTy() || varType->isFloatingPointTy())
1777 && "unexpected promotion type");
1779 if (isa<llvm::IntegerType>(varType))
1780 return CGF.Builder.CreateTrunc(value, varType, "arg.unpromote");
1782 return CGF.Builder.CreateFPCast(value, varType, "arg.unpromote");
1785 /// Returns the attribute (either parameter attribute, or function
1786 /// attribute), which declares argument ArgNo to be non-null.
1787 static const NonNullAttr *getNonNullAttr(const Decl *FD, const ParmVarDecl *PVD,
1788 QualType ArgType, unsigned ArgNo) {
1789 // FIXME: __attribute__((nonnull)) can also be applied to:
1790 // - references to pointers, where the pointee is known to be
1791 // nonnull (apparently a Clang extension)
1792 // - transparent unions containing pointers
1793 // In the former case, LLVM IR cannot represent the constraint. In
1794 // the latter case, we have no guarantee that the transparent union
1795 // is in fact passed as a pointer.
1796 if (!ArgType->isAnyPointerType() && !ArgType->isBlockPointerType())
1798 // First, check attribute on parameter itself.
1800 if (auto ParmNNAttr = PVD->getAttr<NonNullAttr>())
1803 // Check function attributes.
1806 for (const auto *NNAttr : FD->specific_attrs<NonNullAttr>()) {
1807 if (NNAttr->isNonNull(ArgNo))
1813 void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
1815 const FunctionArgList &Args) {
1816 if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>())
1817 // Naked functions don't have prologues.
1820 // If this is an implicit-return-zero function, go ahead and
1821 // initialize the return value. TODO: it might be nice to have
1822 // a more general mechanism for this that didn't require synthesized
1823 // return statements.
1824 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurCodeDecl)) {
1825 if (FD->hasImplicitReturnZero()) {
1826 QualType RetTy = FD->getReturnType().getUnqualifiedType();
1827 llvm::Type* LLVMTy = CGM.getTypes().ConvertType(RetTy);
1828 llvm::Constant* Zero = llvm::Constant::getNullValue(LLVMTy);
1829 Builder.CreateStore(Zero, ReturnValue);
1833 // FIXME: We no longer need the types from FunctionArgList; lift up and
1836 ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), FI);
1837 // Flattened function arguments.
1838 SmallVector<llvm::Argument *, 16> FnArgs;
1839 FnArgs.reserve(IRFunctionArgs.totalIRArgs());
1840 for (auto &Arg : Fn->args()) {
1841 FnArgs.push_back(&Arg);
1843 assert(FnArgs.size() == IRFunctionArgs.totalIRArgs());
1845 // If we're using inalloca, all the memory arguments are GEPs off of the last
1846 // parameter, which is a pointer to the complete memory area.
1847 Address ArgStruct = Address::invalid();
1848 const llvm::StructLayout *ArgStructLayout = nullptr;
1849 if (IRFunctionArgs.hasInallocaArg()) {
1850 ArgStructLayout = CGM.getDataLayout().getStructLayout(FI.getArgStruct());
1851 ArgStruct = Address(FnArgs[IRFunctionArgs.getInallocaArgNo()],
1852 FI.getArgStructAlignment());
1854 assert(ArgStruct.getType() == FI.getArgStruct()->getPointerTo());
1857 // Name the struct return parameter.
1858 if (IRFunctionArgs.hasSRetArg()) {
1859 auto AI = FnArgs[IRFunctionArgs.getSRetArgNo()];
1860 AI->setName("agg.result");
1861 AI->addAttr(llvm::AttributeSet::get(getLLVMContext(), AI->getArgNo() + 1,
1862 llvm::Attribute::NoAlias));
1865 // Track if we received the parameter as a pointer (indirect, byval, or
1866 // inalloca). If already have a pointer, EmitParmDecl doesn't need to copy it
1867 // into a local alloca for us.
1868 SmallVector<ParamValue, 16> ArgVals;
1869 ArgVals.reserve(Args.size());
1871 // Create a pointer value for every parameter declaration. This usually
1872 // entails copying one or more LLVM IR arguments into an alloca. Don't push
1873 // any cleanups or do anything that might unwind. We do that separately, so
1874 // we can push the cleanups in the correct order for the ABI.
1875 assert(FI.arg_size() == Args.size() &&
1876 "Mismatch between function signature & arguments.");
1878 CGFunctionInfo::const_arg_iterator info_it = FI.arg_begin();
1879 for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
1880 i != e; ++i, ++info_it, ++ArgNo) {
1881 const VarDecl *Arg = *i;
1882 QualType Ty = info_it->type;
1883 const ABIArgInfo &ArgI = info_it->info;
1886 isa<ParmVarDecl>(Arg) && cast<ParmVarDecl>(Arg)->isKNRPromoted();
1888 unsigned FirstIRArg, NumIRArgs;
1889 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
1891 switch (ArgI.getKind()) {
1892 case ABIArgInfo::InAlloca: {
1893 assert(NumIRArgs == 0);
1894 auto FieldIndex = ArgI.getInAllocaFieldIndex();
1895 CharUnits FieldOffset =
1896 CharUnits::fromQuantity(ArgStructLayout->getElementOffset(FieldIndex));
1897 Address V = Builder.CreateStructGEP(ArgStruct, FieldIndex, FieldOffset,
1899 ArgVals.push_back(ParamValue::forIndirect(V));
1903 case ABIArgInfo::Indirect: {
1904 assert(NumIRArgs == 1);
1905 Address ParamAddr = Address(FnArgs[FirstIRArg], ArgI.getIndirectAlign());
1907 if (!hasScalarEvaluationKind(Ty)) {
1908 // Aggregates and complex variables are accessed by reference. All we
1909 // need to do is realign the value, if requested.
1910 Address V = ParamAddr;
1911 if (ArgI.getIndirectRealign()) {
1912 Address AlignedTemp = CreateMemTemp(Ty, "coerce");
1914 // Copy from the incoming argument pointer to the temporary with the
1915 // appropriate alignment.
1917 // FIXME: We should have a common utility for generating an aggregate
1919 CharUnits Size = getContext().getTypeSizeInChars(Ty);
1920 auto SizeVal = llvm::ConstantInt::get(IntPtrTy, Size.getQuantity());
1921 Address Dst = Builder.CreateBitCast(AlignedTemp, Int8PtrTy);
1922 Address Src = Builder.CreateBitCast(ParamAddr, Int8PtrTy);
1923 Builder.CreateMemCpy(Dst, Src, SizeVal, false);
1926 ArgVals.push_back(ParamValue::forIndirect(V));
1928 // Load scalar value from indirect argument.
1930 EmitLoadOfScalar(ParamAddr, false, Ty, Arg->getLocStart());
1933 V = emitArgumentDemotion(*this, Arg, V);
1934 ArgVals.push_back(ParamValue::forDirect(V));
1939 case ABIArgInfo::Extend:
1940 case ABIArgInfo::Direct: {
1942 // If we have the trivial case, handle it with no muss and fuss.
1943 if (!isa<llvm::StructType>(ArgI.getCoerceToType()) &&
1944 ArgI.getCoerceToType() == ConvertType(Ty) &&
1945 ArgI.getDirectOffset() == 0) {
1946 assert(NumIRArgs == 1);
1947 auto AI = FnArgs[FirstIRArg];
1948 llvm::Value *V = AI;
1950 if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(Arg)) {
1951 if (getNonNullAttr(CurCodeDecl, PVD, PVD->getType(),
1952 PVD->getFunctionScopeIndex()))
1953 AI->addAttr(llvm::AttributeSet::get(getLLVMContext(),
1955 llvm::Attribute::NonNull));
1957 QualType OTy = PVD->getOriginalType();
1958 if (const auto *ArrTy =
1959 getContext().getAsConstantArrayType(OTy)) {
1960 // A C99 array parameter declaration with the static keyword also
1961 // indicates dereferenceability, and if the size is constant we can
1962 // use the dereferenceable attribute (which requires the size in
1964 if (ArrTy->getSizeModifier() == ArrayType::Static) {
1965 QualType ETy = ArrTy->getElementType();
1966 uint64_t ArrSize = ArrTy->getSize().getZExtValue();
1967 if (!ETy->isIncompleteType() && ETy->isConstantSizeType() &&
1969 llvm::AttrBuilder Attrs;
1970 Attrs.addDereferenceableAttr(
1971 getContext().getTypeSizeInChars(ETy).getQuantity()*ArrSize);
1972 AI->addAttr(llvm::AttributeSet::get(getLLVMContext(),
1973 AI->getArgNo() + 1, Attrs));
1974 } else if (getContext().getTargetAddressSpace(ETy) == 0) {
1975 AI->addAttr(llvm::AttributeSet::get(getLLVMContext(),
1977 llvm::Attribute::NonNull));
1980 } else if (const auto *ArrTy =
1981 getContext().getAsVariableArrayType(OTy)) {
1982 // For C99 VLAs with the static keyword, we don't know the size so
1983 // we can't use the dereferenceable attribute, but in addrspace(0)
1984 // we know that it must be nonnull.
1985 if (ArrTy->getSizeModifier() == VariableArrayType::Static &&
1986 !getContext().getTargetAddressSpace(ArrTy->getElementType()))
1987 AI->addAttr(llvm::AttributeSet::get(getLLVMContext(),
1989 llvm::Attribute::NonNull));
1992 const auto *AVAttr = PVD->getAttr<AlignValueAttr>();
1994 if (const auto *TOTy = dyn_cast<TypedefType>(OTy))
1995 AVAttr = TOTy->getDecl()->getAttr<AlignValueAttr>();
1997 llvm::Value *AlignmentValue =
1998 EmitScalarExpr(AVAttr->getAlignment());
1999 llvm::ConstantInt *AlignmentCI =
2000 cast<llvm::ConstantInt>(AlignmentValue);
2001 unsigned Alignment =
2002 std::min((unsigned) AlignmentCI->getZExtValue(),
2003 +llvm::Value::MaximumAlignment);
2005 llvm::AttrBuilder Attrs;
2006 Attrs.addAlignmentAttr(Alignment);
2007 AI->addAttr(llvm::AttributeSet::get(getLLVMContext(),
2008 AI->getArgNo() + 1, Attrs));
2012 if (Arg->getType().isRestrictQualified())
2013 AI->addAttr(llvm::AttributeSet::get(getLLVMContext(),
2015 llvm::Attribute::NoAlias));
2017 // Ensure the argument is the correct type.
2018 if (V->getType() != ArgI.getCoerceToType())
2019 V = Builder.CreateBitCast(V, ArgI.getCoerceToType());
2022 V = emitArgumentDemotion(*this, Arg, V);
2024 if (const CXXMethodDecl *MD =
2025 dyn_cast_or_null<CXXMethodDecl>(CurCodeDecl)) {
2026 if (MD->isVirtual() && Arg == CXXABIThisDecl)
2027 V = CGM.getCXXABI().
2028 adjustThisParameterInVirtualFunctionPrologue(*this, CurGD, V);
2031 // Because of merging of function types from multiple decls it is
2032 // possible for the type of an argument to not match the corresponding
2033 // type in the function type. Since we are codegening the callee
2034 // in here, add a cast to the argument type.
2035 llvm::Type *LTy = ConvertType(Arg->getType());
2036 if (V->getType() != LTy)
2037 V = Builder.CreateBitCast(V, LTy);
2039 ArgVals.push_back(ParamValue::forDirect(V));
2043 Address Alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg),
2046 // Pointer to store into.
2047 Address Ptr = emitAddressAtOffset(*this, Alloca, ArgI);
2049 // Fast-isel and the optimizer generally like scalar values better than
2050 // FCAs, so we flatten them if this is safe to do for this argument.
2051 llvm::StructType *STy = dyn_cast<llvm::StructType>(ArgI.getCoerceToType());
2052 if (ArgI.isDirect() && ArgI.getCanBeFlattened() && STy &&
2053 STy->getNumElements() > 1) {
2054 auto SrcLayout = CGM.getDataLayout().getStructLayout(STy);
2055 uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(STy);
2056 llvm::Type *DstTy = Ptr.getElementType();
2057 uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(DstTy);
2059 Address AddrToStoreInto = Address::invalid();
2060 if (SrcSize <= DstSize) {
2062 Builder.CreateBitCast(Ptr, llvm::PointerType::getUnqual(STy));
2065 CreateTempAlloca(STy, Alloca.getAlignment(), "coerce");
2068 assert(STy->getNumElements() == NumIRArgs);
2069 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
2070 auto AI = FnArgs[FirstIRArg + i];
2071 AI->setName(Arg->getName() + ".coerce" + Twine(i));
2072 auto Offset = CharUnits::fromQuantity(SrcLayout->getElementOffset(i));
2074 Builder.CreateStructGEP(AddrToStoreInto, i, Offset);
2075 Builder.CreateStore(AI, EltPtr);
2078 if (SrcSize > DstSize) {
2079 Builder.CreateMemCpy(Ptr, AddrToStoreInto, DstSize);
2083 // Simple case, just do a coerced store of the argument into the alloca.
2084 assert(NumIRArgs == 1);
2085 auto AI = FnArgs[FirstIRArg];
2086 AI->setName(Arg->getName() + ".coerce");
2087 CreateCoercedStore(AI, Ptr, /*DestIsVolatile=*/false, *this);
2090 // Match to what EmitParmDecl is expecting for this type.
2091 if (CodeGenFunction::hasScalarEvaluationKind(Ty)) {
2093 EmitLoadOfScalar(Alloca, false, Ty, Arg->getLocStart());
2095 V = emitArgumentDemotion(*this, Arg, V);
2096 ArgVals.push_back(ParamValue::forDirect(V));
2098 ArgVals.push_back(ParamValue::forIndirect(Alloca));
2103 case ABIArgInfo::Expand: {
2104 // If this structure was expanded into multiple arguments then
2105 // we need to create a temporary and reconstruct it from the
2107 Address Alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg));
2108 LValue LV = MakeAddrLValue(Alloca, Ty);
2109 ArgVals.push_back(ParamValue::forIndirect(Alloca));
2111 auto FnArgIter = FnArgs.begin() + FirstIRArg;
2112 ExpandTypeFromArgs(Ty, LV, FnArgIter);
2113 assert(FnArgIter == FnArgs.begin() + FirstIRArg + NumIRArgs);
2114 for (unsigned i = 0, e = NumIRArgs; i != e; ++i) {
2115 auto AI = FnArgs[FirstIRArg + i];
2116 AI->setName(Arg->getName() + "." + Twine(i));
2121 case ABIArgInfo::Ignore:
2122 assert(NumIRArgs == 0);
2123 // Initialize the local variable appropriately.
2124 if (!hasScalarEvaluationKind(Ty)) {
2125 ArgVals.push_back(ParamValue::forIndirect(CreateMemTemp(Ty)));
2127 llvm::Value *U = llvm::UndefValue::get(ConvertType(Arg->getType()));
2128 ArgVals.push_back(ParamValue::forDirect(U));
2134 if (getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) {
2135 for (int I = Args.size() - 1; I >= 0; --I)
2136 EmitParmDecl(*Args[I], ArgVals[I], I + 1);
2138 for (unsigned I = 0, E = Args.size(); I != E; ++I)
2139 EmitParmDecl(*Args[I], ArgVals[I], I + 1);
2143 static void eraseUnusedBitCasts(llvm::Instruction *insn) {
2144 while (insn->use_empty()) {
2145 llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(insn);
2146 if (!bitcast) return;
2148 // This is "safe" because we would have used a ConstantExpr otherwise.
2149 insn = cast<llvm::Instruction>(bitcast->getOperand(0));
2150 bitcast->eraseFromParent();
2154 /// Try to emit a fused autorelease of a return result.
2155 static llvm::Value *tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF,
2156 llvm::Value *result) {
2157 // We must be immediately followed the cast.
2158 llvm::BasicBlock *BB = CGF.Builder.GetInsertBlock();
2159 if (BB->empty()) return nullptr;
2160 if (&BB->back() != result) return nullptr;
2162 llvm::Type *resultType = result->getType();
2164 // result is in a BasicBlock and is therefore an Instruction.
2165 llvm::Instruction *generator = cast<llvm::Instruction>(result);
2167 SmallVector<llvm::Instruction*,4> insnsToKill;
2170 // %generator = bitcast %type1* %generator2 to %type2*
2171 while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(generator)) {
2172 // We would have emitted this as a constant if the operand weren't
2174 generator = cast<llvm::Instruction>(bitcast->getOperand(0));
2176 // Require the generator to be immediately followed by the cast.
2177 if (generator->getNextNode() != bitcast)
2180 insnsToKill.push_back(bitcast);
2184 // %generator = call i8* @objc_retain(i8* %originalResult)
2186 // %generator = call i8* @objc_retainAutoreleasedReturnValue(i8* %originalResult)
2187 llvm::CallInst *call = dyn_cast<llvm::CallInst>(generator);
2188 if (!call) return nullptr;
2190 bool doRetainAutorelease;
2192 if (call->getCalledValue() == CGF.CGM.getObjCEntrypoints().objc_retain) {
2193 doRetainAutorelease = true;
2194 } else if (call->getCalledValue() == CGF.CGM.getObjCEntrypoints()
2195 .objc_retainAutoreleasedReturnValue) {
2196 doRetainAutorelease = false;
2198 // If we emitted an assembly marker for this call (and the
2199 // ARCEntrypoints field should have been set if so), go looking
2200 // for that call. If we can't find it, we can't do this
2201 // optimization. But it should always be the immediately previous
2202 // instruction, unless we needed bitcasts around the call.
2203 if (CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker) {
2204 llvm::Instruction *prev = call->getPrevNode();
2206 if (isa<llvm::BitCastInst>(prev)) {
2207 prev = prev->getPrevNode();
2210 assert(isa<llvm::CallInst>(prev));
2211 assert(cast<llvm::CallInst>(prev)->getCalledValue() ==
2212 CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker);
2213 insnsToKill.push_back(prev);
2219 result = call->getArgOperand(0);
2220 insnsToKill.push_back(call);
2222 // Keep killing bitcasts, for sanity. Note that we no longer care
2223 // about precise ordering as long as there's exactly one use.
2224 while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(result)) {
2225 if (!bitcast->hasOneUse()) break;
2226 insnsToKill.push_back(bitcast);
2227 result = bitcast->getOperand(0);
2230 // Delete all the unnecessary instructions, from latest to earliest.
2231 for (SmallVectorImpl<llvm::Instruction*>::iterator
2232 i = insnsToKill.begin(), e = insnsToKill.end(); i != e; ++i)
2233 (*i)->eraseFromParent();
2235 // Do the fused retain/autorelease if we were asked to.
2236 if (doRetainAutorelease)
2237 result = CGF.EmitARCRetainAutoreleaseReturnValue(result);
2239 // Cast back to the result type.
2240 return CGF.Builder.CreateBitCast(result, resultType);
2243 /// If this is a +1 of the value of an immutable 'self', remove it.
2244 static llvm::Value *tryRemoveRetainOfSelf(CodeGenFunction &CGF,
2245 llvm::Value *result) {
2246 // This is only applicable to a method with an immutable 'self'.
2247 const ObjCMethodDecl *method =
2248 dyn_cast_or_null<ObjCMethodDecl>(CGF.CurCodeDecl);
2249 if (!method) return nullptr;
2250 const VarDecl *self = method->getSelfDecl();
2251 if (!self->getType().isConstQualified()) return nullptr;
2253 // Look for a retain call.
2254 llvm::CallInst *retainCall =
2255 dyn_cast<llvm::CallInst>(result->stripPointerCasts());
2257 retainCall->getCalledValue() != CGF.CGM.getObjCEntrypoints().objc_retain)
2260 // Look for an ordinary load of 'self'.
2261 llvm::Value *retainedValue = retainCall->getArgOperand(0);
2262 llvm::LoadInst *load =
2263 dyn_cast<llvm::LoadInst>(retainedValue->stripPointerCasts());
2264 if (!load || load->isAtomic() || load->isVolatile() ||
2265 load->getPointerOperand() != CGF.GetAddrOfLocalVar(self).getPointer())
2268 // Okay! Burn it all down. This relies for correctness on the
2269 // assumption that the retain is emitted as part of the return and
2270 // that thereafter everything is used "linearly".
2271 llvm::Type *resultType = result->getType();
2272 eraseUnusedBitCasts(cast<llvm::Instruction>(result));
2273 assert(retainCall->use_empty());
2274 retainCall->eraseFromParent();
2275 eraseUnusedBitCasts(cast<llvm::Instruction>(retainedValue));
2277 return CGF.Builder.CreateBitCast(load, resultType);
2280 /// Emit an ARC autorelease of the result of a function.
2282 /// \return the value to actually return from the function
2283 static llvm::Value *emitAutoreleaseOfResult(CodeGenFunction &CGF,
2284 llvm::Value *result) {
2285 // If we're returning 'self', kill the initial retain. This is a
2286 // heuristic attempt to "encourage correctness" in the really unfortunate
2287 // case where we have a return of self during a dealloc and we desperately
2288 // need to avoid the possible autorelease.
2289 if (llvm::Value *self = tryRemoveRetainOfSelf(CGF, result))
2292 // At -O0, try to emit a fused retain/autorelease.
2293 if (CGF.shouldUseFusedARCCalls())
2294 if (llvm::Value *fused = tryEmitFusedAutoreleaseOfResult(CGF, result))
2297 return CGF.EmitARCAutoreleaseReturnValue(result);
2300 /// Heuristically search for a dominating store to the return-value slot.
2301 static llvm::StoreInst *findDominatingStoreToReturnValue(CodeGenFunction &CGF) {
2302 // Check if a User is a store which pointerOperand is the ReturnValue.
2303 // We are looking for stores to the ReturnValue, not for stores of the
2304 // ReturnValue to some other location.
2305 auto GetStoreIfValid = [&CGF](llvm::User *U) -> llvm::StoreInst * {
2306 auto *SI = dyn_cast<llvm::StoreInst>(U);
2307 if (!SI || SI->getPointerOperand() != CGF.ReturnValue.getPointer())
2309 // These aren't actually possible for non-coerced returns, and we
2310 // only care about non-coerced returns on this code path.
2311 assert(!SI->isAtomic() && !SI->isVolatile());
2314 // If there are multiple uses of the return-value slot, just check
2315 // for something immediately preceding the IP. Sometimes this can
2316 // happen with how we generate implicit-returns; it can also happen
2317 // with noreturn cleanups.
2318 if (!CGF.ReturnValue.getPointer()->hasOneUse()) {
2319 llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
2320 if (IP->empty()) return nullptr;
2321 llvm::Instruction *I = &IP->back();
2323 // Skip lifetime markers
2324 for (llvm::BasicBlock::reverse_iterator II = IP->rbegin(),
2327 if (llvm::IntrinsicInst *Intrinsic =
2328 dyn_cast<llvm::IntrinsicInst>(&*II)) {
2329 if (Intrinsic->getIntrinsicID() == llvm::Intrinsic::lifetime_end) {
2330 const llvm::Value *CastAddr = Intrinsic->getArgOperand(1);
2334 if (isa<llvm::BitCastInst>(&*II) && (CastAddr == &*II))
2342 return GetStoreIfValid(I);
2345 llvm::StoreInst *store =
2346 GetStoreIfValid(CGF.ReturnValue.getPointer()->user_back());
2347 if (!store) return nullptr;
2349 // Now do a first-and-dirty dominance check: just walk up the
2350 // single-predecessors chain from the current insertion point.
2351 llvm::BasicBlock *StoreBB = store->getParent();
2352 llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
2353 while (IP != StoreBB) {
2354 if (!(IP = IP->getSinglePredecessor()))
2358 // Okay, the store's basic block dominates the insertion point; we
2359 // can do our thing.
2363 void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI,
2365 SourceLocation EndLoc) {
2366 if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>()) {
2367 // Naked functions don't have epilogues.
2368 Builder.CreateUnreachable();
2372 // Functions with no result always return void.
2373 if (!ReturnValue.isValid()) {
2374 Builder.CreateRetVoid();
2378 llvm::DebugLoc RetDbgLoc;
2379 llvm::Value *RV = nullptr;
2380 QualType RetTy = FI.getReturnType();
2381 const ABIArgInfo &RetAI = FI.getReturnInfo();
2383 switch (RetAI.getKind()) {
2384 case ABIArgInfo::InAlloca:
2385 // Aggregrates get evaluated directly into the destination. Sometimes we
2386 // need to return the sret value in a register, though.
2387 assert(hasAggregateEvaluationKind(RetTy));
2388 if (RetAI.getInAllocaSRet()) {
2389 llvm::Function::arg_iterator EI = CurFn->arg_end();
2391 llvm::Value *ArgStruct = &*EI;
2392 llvm::Value *SRet = Builder.CreateStructGEP(
2393 nullptr, ArgStruct, RetAI.getInAllocaFieldIndex());
2394 RV = Builder.CreateAlignedLoad(SRet, getPointerAlign(), "sret");
2398 case ABIArgInfo::Indirect: {
2399 auto AI = CurFn->arg_begin();
2400 if (RetAI.isSRetAfterThis())
2402 switch (getEvaluationKind(RetTy)) {
2405 EmitLoadOfComplex(MakeAddrLValue(ReturnValue, RetTy), EndLoc);
2406 EmitStoreOfComplex(RT, MakeNaturalAlignAddrLValue(&*AI, RetTy),
2411 // Do nothing; aggregrates get evaluated directly into the destination.
2414 EmitStoreOfScalar(Builder.CreateLoad(ReturnValue),
2415 MakeNaturalAlignAddrLValue(&*AI, RetTy),
2422 case ABIArgInfo::Extend:
2423 case ABIArgInfo::Direct:
2424 if (RetAI.getCoerceToType() == ConvertType(RetTy) &&
2425 RetAI.getDirectOffset() == 0) {
2426 // The internal return value temp always will have pointer-to-return-type
2427 // type, just do a load.
2429 // If there is a dominating store to ReturnValue, we can elide
2430 // the load, zap the store, and usually zap the alloca.
2431 if (llvm::StoreInst *SI =
2432 findDominatingStoreToReturnValue(*this)) {
2433 // Reuse the debug location from the store unless there is
2434 // cleanup code to be emitted between the store and return
2436 if (EmitRetDbgLoc && !AutoreleaseResult)
2437 RetDbgLoc = SI->getDebugLoc();
2438 // Get the stored value and nuke the now-dead store.
2439 RV = SI->getValueOperand();
2440 SI->eraseFromParent();
2442 // If that was the only use of the return value, nuke it as well now.
2443 auto returnValueInst = ReturnValue.getPointer();
2444 if (returnValueInst->use_empty()) {
2445 if (auto alloca = dyn_cast<llvm::AllocaInst>(returnValueInst)) {
2446 alloca->eraseFromParent();
2447 ReturnValue = Address::invalid();
2451 // Otherwise, we have to do a simple load.
2453 RV = Builder.CreateLoad(ReturnValue);
2456 // If the value is offset in memory, apply the offset now.
2457 Address V = emitAddressAtOffset(*this, ReturnValue, RetAI);
2459 RV = CreateCoercedLoad(V, RetAI.getCoerceToType(), *this);
2462 // In ARC, end functions that return a retainable type with a call
2463 // to objc_autoreleaseReturnValue.
2464 if (AutoreleaseResult) {
2465 assert(getLangOpts().ObjCAutoRefCount &&
2466 !FI.isReturnsRetained() &&
2467 RetTy->isObjCRetainableType());
2468 RV = emitAutoreleaseOfResult(*this, RV);
2473 case ABIArgInfo::Ignore:
2476 case ABIArgInfo::Expand:
2477 llvm_unreachable("Invalid ABI kind for return argument");
2480 llvm::Instruction *Ret;
2482 if (CurCodeDecl && SanOpts.has(SanitizerKind::ReturnsNonnullAttribute)) {
2483 if (auto RetNNAttr = CurCodeDecl->getAttr<ReturnsNonNullAttr>()) {
2484 SanitizerScope SanScope(this);
2485 llvm::Value *Cond = Builder.CreateICmpNE(
2486 RV, llvm::Constant::getNullValue(RV->getType()));
2487 llvm::Constant *StaticData[] = {
2488 EmitCheckSourceLocation(EndLoc),
2489 EmitCheckSourceLocation(RetNNAttr->getLocation()),
2491 EmitCheck(std::make_pair(Cond, SanitizerKind::ReturnsNonnullAttribute),
2492 "nonnull_return", StaticData, None);
2495 Ret = Builder.CreateRet(RV);
2497 Ret = Builder.CreateRetVoid();
2501 Ret->setDebugLoc(std::move(RetDbgLoc));
2504 static bool isInAllocaArgument(CGCXXABI &ABI, QualType type) {
2505 const CXXRecordDecl *RD = type->getAsCXXRecordDecl();
2506 return RD && ABI.getRecordArgABI(RD) == CGCXXABI::RAA_DirectInMemory;
2509 static AggValueSlot createPlaceholderSlot(CodeGenFunction &CGF,
2511 // FIXME: Generate IR in one pass, rather than going back and fixing up these
2513 llvm::Type *IRTy = CGF.ConvertTypeForMem(Ty);
2514 llvm::Value *Placeholder =
2515 llvm::UndefValue::get(IRTy->getPointerTo()->getPointerTo());
2516 Placeholder = CGF.Builder.CreateDefaultAlignedLoad(Placeholder);
2518 // FIXME: When we generate this IR in one pass, we shouldn't need
2519 // this win32-specific alignment hack.
2520 CharUnits Align = CharUnits::fromQuantity(4);
2522 return AggValueSlot::forAddr(Address(Placeholder, Align),
2524 AggValueSlot::IsNotDestructed,
2525 AggValueSlot::DoesNotNeedGCBarriers,
2526 AggValueSlot::IsNotAliased);
2529 void CodeGenFunction::EmitDelegateCallArg(CallArgList &args,
2530 const VarDecl *param,
2531 SourceLocation loc) {
2532 // StartFunction converted the ABI-lowered parameter(s) into a
2533 // local alloca. We need to turn that into an r-value suitable
2535 Address local = GetAddrOfLocalVar(param);
2537 QualType type = param->getType();
2539 // For the most part, we just need to load the alloca, except:
2540 // 1) aggregate r-values are actually pointers to temporaries, and
2541 // 2) references to non-scalars are pointers directly to the aggregate.
2542 // I don't know why references to scalars are different here.
2543 if (const ReferenceType *ref = type->getAs<ReferenceType>()) {
2544 if (!hasScalarEvaluationKind(ref->getPointeeType()))
2545 return args.add(RValue::getAggregate(local), type);
2547 // Locals which are references to scalars are represented
2548 // with allocas holding the pointer.
2549 return args.add(RValue::get(Builder.CreateLoad(local)), type);
2552 assert(!isInAllocaArgument(CGM.getCXXABI(), type) &&
2553 "cannot emit delegate call arguments for inalloca arguments!");
2555 args.add(convertTempToRValue(local, type, loc), type);
2558 static bool isProvablyNull(llvm::Value *addr) {
2559 return isa<llvm::ConstantPointerNull>(addr);
2562 static bool isProvablyNonNull(llvm::Value *addr) {
2563 return isa<llvm::AllocaInst>(addr);
2566 /// Emit the actual writing-back of a writeback.
2567 static void emitWriteback(CodeGenFunction &CGF,
2568 const CallArgList::Writeback &writeback) {
2569 const LValue &srcLV = writeback.Source;
2570 Address srcAddr = srcLV.getAddress();
2571 assert(!isProvablyNull(srcAddr.getPointer()) &&
2572 "shouldn't have writeback for provably null argument");
2574 llvm::BasicBlock *contBB = nullptr;
2576 // If the argument wasn't provably non-null, we need to null check
2577 // before doing the store.
2578 bool provablyNonNull = isProvablyNonNull(srcAddr.getPointer());
2579 if (!provablyNonNull) {
2580 llvm::BasicBlock *writebackBB = CGF.createBasicBlock("icr.writeback");
2581 contBB = CGF.createBasicBlock("icr.done");
2583 llvm::Value *isNull =
2584 CGF.Builder.CreateIsNull(srcAddr.getPointer(), "icr.isnull");
2585 CGF.Builder.CreateCondBr(isNull, contBB, writebackBB);
2586 CGF.EmitBlock(writebackBB);
2589 // Load the value to writeback.
2590 llvm::Value *value = CGF.Builder.CreateLoad(writeback.Temporary);
2592 // Cast it back, in case we're writing an id to a Foo* or something.
2593 value = CGF.Builder.CreateBitCast(value, srcAddr.getElementType(),
2594 "icr.writeback-cast");
2596 // Perform the writeback.
2598 // If we have a "to use" value, it's something we need to emit a use
2599 // of. This has to be carefully threaded in: if it's done after the
2600 // release it's potentially undefined behavior (and the optimizer
2601 // will ignore it), and if it happens before the retain then the
2602 // optimizer could move the release there.
2603 if (writeback.ToUse) {
2604 assert(srcLV.getObjCLifetime() == Qualifiers::OCL_Strong);
2606 // Retain the new value. No need to block-copy here: the block's
2607 // being passed up the stack.
2608 value = CGF.EmitARCRetainNonBlock(value);
2610 // Emit the intrinsic use here.
2611 CGF.EmitARCIntrinsicUse(writeback.ToUse);
2613 // Load the old value (primitively).
2614 llvm::Value *oldValue = CGF.EmitLoadOfScalar(srcLV, SourceLocation());
2616 // Put the new value in place (primitively).
2617 CGF.EmitStoreOfScalar(value, srcLV, /*init*/ false);
2619 // Release the old value.
2620 CGF.EmitARCRelease(oldValue, srcLV.isARCPreciseLifetime());
2622 // Otherwise, we can just do a normal lvalue store.
2624 CGF.EmitStoreThroughLValue(RValue::get(value), srcLV);
2627 // Jump to the continuation block.
2628 if (!provablyNonNull)
2629 CGF.EmitBlock(contBB);
2632 static void emitWritebacks(CodeGenFunction &CGF,
2633 const CallArgList &args) {
2634 for (const auto &I : args.writebacks())
2635 emitWriteback(CGF, I);
2638 static void deactivateArgCleanupsBeforeCall(CodeGenFunction &CGF,
2639 const CallArgList &CallArgs) {
2640 assert(CGF.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee());
2641 ArrayRef<CallArgList::CallArgCleanup> Cleanups =
2642 CallArgs.getCleanupsToDeactivate();
2643 // Iterate in reverse to increase the likelihood of popping the cleanup.
2644 for (const auto &I : llvm::reverse(Cleanups)) {
2645 CGF.DeactivateCleanupBlock(I.Cleanup, I.IsActiveIP);
2646 I.IsActiveIP->eraseFromParent();
2650 static const Expr *maybeGetUnaryAddrOfOperand(const Expr *E) {
2651 if (const UnaryOperator *uop = dyn_cast<UnaryOperator>(E->IgnoreParens()))
2652 if (uop->getOpcode() == UO_AddrOf)
2653 return uop->getSubExpr();
2657 /// Emit an argument that's being passed call-by-writeback. That is,
2658 /// we are passing the address of an __autoreleased temporary; it
2659 /// might be copy-initialized with the current value of the given
2660 /// address, but it will definitely be copied out of after the call.
2661 static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args,
2662 const ObjCIndirectCopyRestoreExpr *CRE) {
2665 // Make an optimistic effort to emit the address as an l-value.
2666 // This can fail if the argument expression is more complicated.
2667 if (const Expr *lvExpr = maybeGetUnaryAddrOfOperand(CRE->getSubExpr())) {
2668 srcLV = CGF.EmitLValue(lvExpr);
2670 // Otherwise, just emit it as a scalar.
2672 Address srcAddr = CGF.EmitPointerWithAlignment(CRE->getSubExpr());
2674 QualType srcAddrType =
2675 CRE->getSubExpr()->getType()->castAs<PointerType>()->getPointeeType();
2676 srcLV = CGF.MakeAddrLValue(srcAddr, srcAddrType);
2678 Address srcAddr = srcLV.getAddress();
2680 // The dest and src types don't necessarily match in LLVM terms
2681 // because of the crazy ObjC compatibility rules.
2683 llvm::PointerType *destType =
2684 cast<llvm::PointerType>(CGF.ConvertType(CRE->getType()));
2686 // If the address is a constant null, just pass the appropriate null.
2687 if (isProvablyNull(srcAddr.getPointer())) {
2688 args.add(RValue::get(llvm::ConstantPointerNull::get(destType)),
2693 // Create the temporary.
2694 Address temp = CGF.CreateTempAlloca(destType->getElementType(),
2695 CGF.getPointerAlign(),
2697 // Loading an l-value can introduce a cleanup if the l-value is __weak,
2698 // and that cleanup will be conditional if we can't prove that the l-value
2699 // isn't null, so we need to register a dominating point so that the cleanups
2700 // system will make valid IR.
2701 CodeGenFunction::ConditionalEvaluation condEval(CGF);
2703 // Zero-initialize it if we're not doing a copy-initialization.
2704 bool shouldCopy = CRE->shouldCopy();
2707 llvm::ConstantPointerNull::get(
2708 cast<llvm::PointerType>(destType->getElementType()));
2709 CGF.Builder.CreateStore(null, temp);
2712 llvm::BasicBlock *contBB = nullptr;
2713 llvm::BasicBlock *originBB = nullptr;
2715 // If the address is *not* known to be non-null, we need to switch.
2716 llvm::Value *finalArgument;
2718 bool provablyNonNull = isProvablyNonNull(srcAddr.getPointer());
2719 if (provablyNonNull) {
2720 finalArgument = temp.getPointer();
2722 llvm::Value *isNull =
2723 CGF.Builder.CreateIsNull(srcAddr.getPointer(), "icr.isnull");
2725 finalArgument = CGF.Builder.CreateSelect(isNull,
2726 llvm::ConstantPointerNull::get(destType),
2727 temp.getPointer(), "icr.argument");
2729 // If we need to copy, then the load has to be conditional, which
2730 // means we need control flow.
2732 originBB = CGF.Builder.GetInsertBlock();
2733 contBB = CGF.createBasicBlock("icr.cont");
2734 llvm::BasicBlock *copyBB = CGF.createBasicBlock("icr.copy");
2735 CGF.Builder.CreateCondBr(isNull, contBB, copyBB);
2736 CGF.EmitBlock(copyBB);
2737 condEval.begin(CGF);
2741 llvm::Value *valueToUse = nullptr;
2743 // Perform a copy if necessary.
2745 RValue srcRV = CGF.EmitLoadOfLValue(srcLV, SourceLocation());
2746 assert(srcRV.isScalar());
2748 llvm::Value *src = srcRV.getScalarVal();
2749 src = CGF.Builder.CreateBitCast(src, destType->getElementType(),
2752 // Use an ordinary store, not a store-to-lvalue.
2753 CGF.Builder.CreateStore(src, temp);
2755 // If optimization is enabled, and the value was held in a
2756 // __strong variable, we need to tell the optimizer that this
2757 // value has to stay alive until we're doing the store back.
2758 // This is because the temporary is effectively unretained,
2759 // and so otherwise we can violate the high-level semantics.
2760 if (CGF.CGM.getCodeGenOpts().OptimizationLevel != 0 &&
2761 srcLV.getObjCLifetime() == Qualifiers::OCL_Strong) {
2766 // Finish the control flow if we needed it.
2767 if (shouldCopy && !provablyNonNull) {
2768 llvm::BasicBlock *copyBB = CGF.Builder.GetInsertBlock();
2769 CGF.EmitBlock(contBB);
2771 // Make a phi for the value to intrinsically use.
2773 llvm::PHINode *phiToUse = CGF.Builder.CreatePHI(valueToUse->getType(), 2,
2775 phiToUse->addIncoming(valueToUse, copyBB);
2776 phiToUse->addIncoming(llvm::UndefValue::get(valueToUse->getType()),
2778 valueToUse = phiToUse;
2784 args.addWriteback(srcLV, temp, valueToUse);
2785 args.add(RValue::get(finalArgument), CRE->getType());
2788 void CallArgList::allocateArgumentMemory(CodeGenFunction &CGF) {
2789 assert(!StackBase && !StackCleanup.isValid());
2792 llvm::Function *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stacksave);
2793 StackBase = CGF.Builder.CreateCall(F, {}, "inalloca.save");
2796 void CallArgList::freeArgumentMemory(CodeGenFunction &CGF) const {
2798 // Restore the stack after the call.
2799 llvm::Value *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stackrestore);
2800 CGF.Builder.CreateCall(F, StackBase);
2804 void CodeGenFunction::EmitNonNullArgCheck(RValue RV, QualType ArgType,
2805 SourceLocation ArgLoc,
2806 const FunctionDecl *FD,
2808 if (!SanOpts.has(SanitizerKind::NonnullAttribute) || !FD)
2810 auto PVD = ParmNum < FD->getNumParams() ? FD->getParamDecl(ParmNum) : nullptr;
2811 unsigned ArgNo = PVD ? PVD->getFunctionScopeIndex() : ParmNum;
2812 auto NNAttr = getNonNullAttr(FD, PVD, ArgType, ArgNo);
2815 SanitizerScope SanScope(this);
2816 assert(RV.isScalar());
2817 llvm::Value *V = RV.getScalarVal();
2819 Builder.CreateICmpNE(V, llvm::Constant::getNullValue(V->getType()));
2820 llvm::Constant *StaticData[] = {
2821 EmitCheckSourceLocation(ArgLoc),
2822 EmitCheckSourceLocation(NNAttr->getLocation()),
2823 llvm::ConstantInt::get(Int32Ty, ArgNo + 1),
2825 EmitCheck(std::make_pair(Cond, SanitizerKind::NonnullAttribute),
2826 "nonnull_arg", StaticData, None);
2829 void CodeGenFunction::EmitCallArgs(
2830 CallArgList &Args, ArrayRef<QualType> ArgTypes,
2831 llvm::iterator_range<CallExpr::const_arg_iterator> ArgRange,
2832 const FunctionDecl *CalleeDecl, unsigned ParamsToSkip) {
2833 assert((int)ArgTypes.size() == (ArgRange.end() - ArgRange.begin()));
2835 auto MaybeEmitImplicitObjectSize = [&](unsigned I, const Expr *Arg) {
2836 if (CalleeDecl == nullptr || I >= CalleeDecl->getNumParams())
2838 auto *PS = CalleeDecl->getParamDecl(I)->getAttr<PassObjectSizeAttr>();
2842 const auto &Context = getContext();
2843 auto SizeTy = Context.getSizeType();
2844 auto T = Builder.getIntNTy(Context.getTypeSize(SizeTy));
2845 llvm::Value *V = evaluateOrEmitBuiltinObjectSize(Arg, PS->getType(), T);
2846 Args.add(RValue::get(V), SizeTy);
2849 // We *have* to evaluate arguments from right to left in the MS C++ ABI,
2850 // because arguments are destroyed left to right in the callee.
2851 if (CGM.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) {
2852 // Insert a stack save if we're going to need any inalloca args.
2853 bool HasInAllocaArgs = false;
2854 for (ArrayRef<QualType>::iterator I = ArgTypes.begin(), E = ArgTypes.end();
2855 I != E && !HasInAllocaArgs; ++I)
2856 HasInAllocaArgs = isInAllocaArgument(CGM.getCXXABI(), *I);
2857 if (HasInAllocaArgs) {
2858 assert(getTarget().getTriple().getArch() == llvm::Triple::x86);
2859 Args.allocateArgumentMemory(*this);
2862 // Evaluate each argument.
2863 size_t CallArgsStart = Args.size();
2864 for (int I = ArgTypes.size() - 1; I >= 0; --I) {
2865 CallExpr::const_arg_iterator Arg = ArgRange.begin() + I;
2866 EmitCallArg(Args, *Arg, ArgTypes[I]);
2867 EmitNonNullArgCheck(Args.back().RV, ArgTypes[I], (*Arg)->getExprLoc(),
2868 CalleeDecl, ParamsToSkip + I);
2869 MaybeEmitImplicitObjectSize(I, *Arg);
2872 // Un-reverse the arguments we just evaluated so they match up with the LLVM
2874 std::reverse(Args.begin() + CallArgsStart, Args.end());
2878 for (unsigned I = 0, E = ArgTypes.size(); I != E; ++I) {
2879 CallExpr::const_arg_iterator Arg = ArgRange.begin() + I;
2880 assert(Arg != ArgRange.end());
2881 EmitCallArg(Args, *Arg, ArgTypes[I]);
2882 EmitNonNullArgCheck(Args.back().RV, ArgTypes[I], (*Arg)->getExprLoc(),
2883 CalleeDecl, ParamsToSkip + I);
2884 MaybeEmitImplicitObjectSize(I, *Arg);
2890 struct DestroyUnpassedArg final : EHScopeStack::Cleanup {
2891 DestroyUnpassedArg(Address Addr, QualType Ty)
2892 : Addr(Addr), Ty(Ty) {}
2897 void Emit(CodeGenFunction &CGF, Flags flags) override {
2898 const CXXDestructorDecl *Dtor = Ty->getAsCXXRecordDecl()->getDestructor();
2899 assert(!Dtor->isTrivial());
2900 CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete, /*for vbase*/ false,
2901 /*Delegating=*/false, Addr);
2905 struct DisableDebugLocationUpdates {
2906 CodeGenFunction &CGF;
2907 bool disabledDebugInfo;
2908 DisableDebugLocationUpdates(CodeGenFunction &CGF, const Expr *E) : CGF(CGF) {
2909 if ((disabledDebugInfo = isa<CXXDefaultArgExpr>(E) && CGF.getDebugInfo()))
2910 CGF.disableDebugInfo();
2912 ~DisableDebugLocationUpdates() {
2913 if (disabledDebugInfo)
2914 CGF.enableDebugInfo();
2918 } // end anonymous namespace
2920 void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E,
2922 DisableDebugLocationUpdates Dis(*this, E);
2923 if (const ObjCIndirectCopyRestoreExpr *CRE
2924 = dyn_cast<ObjCIndirectCopyRestoreExpr>(E)) {
2925 assert(getLangOpts().ObjCAutoRefCount);
2926 assert(getContext().hasSameType(E->getType(), type));
2927 return emitWritebackArg(*this, args, CRE);
2930 assert(type->isReferenceType() == E->isGLValue() &&
2931 "reference binding to unmaterialized r-value!");
2933 if (E->isGLValue()) {
2934 assert(E->getObjectKind() == OK_Ordinary);
2935 return args.add(EmitReferenceBindingToExpr(E), type);
2938 bool HasAggregateEvalKind = hasAggregateEvaluationKind(type);
2940 // In the Microsoft C++ ABI, aggregate arguments are destructed by the callee.
2941 // However, we still have to push an EH-only cleanup in case we unwind before
2942 // we make it to the call.
2943 if (HasAggregateEvalKind &&
2944 CGM.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) {
2945 // If we're using inalloca, use the argument memory. Otherwise, use a
2948 if (args.isUsingInAlloca())
2949 Slot = createPlaceholderSlot(*this, type);
2951 Slot = CreateAggTemp(type, "agg.tmp");
2953 const CXXRecordDecl *RD = type->getAsCXXRecordDecl();
2954 bool DestroyedInCallee =
2955 RD && RD->hasNonTrivialDestructor() &&
2956 CGM.getCXXABI().getRecordArgABI(RD) != CGCXXABI::RAA_Default;
2957 if (DestroyedInCallee)
2958 Slot.setExternallyDestructed();
2960 EmitAggExpr(E, Slot);
2961 RValue RV = Slot.asRValue();
2964 if (DestroyedInCallee) {
2965 // Create a no-op GEP between the placeholder and the cleanup so we can
2966 // RAUW it successfully. It also serves as a marker of the first
2967 // instruction where the cleanup is active.
2968 pushFullExprCleanup<DestroyUnpassedArg>(EHCleanup, Slot.getAddress(),
2970 // This unreachable is a temporary marker which will be removed later.
2971 llvm::Instruction *IsActive = Builder.CreateUnreachable();
2972 args.addArgCleanupDeactivation(EHStack.getInnermostEHScope(), IsActive);
2977 if (HasAggregateEvalKind && isa<ImplicitCastExpr>(E) &&
2978 cast<CastExpr>(E)->getCastKind() == CK_LValueToRValue) {
2979 LValue L = EmitLValue(cast<CastExpr>(E)->getSubExpr());
2980 assert(L.isSimple());
2981 if (L.getAlignment() >= getContext().getTypeAlignInChars(type)) {
2982 args.add(L.asAggregateRValue(), type, /*NeedsCopy*/true);
2984 // We can't represent a misaligned lvalue in the CallArgList, so copy
2985 // to an aligned temporary now.
2986 Address tmp = CreateMemTemp(type);
2987 EmitAggregateCopy(tmp, L.getAddress(), type, L.isVolatile());
2988 args.add(RValue::getAggregate(tmp), type);
2993 args.add(EmitAnyExprToTemp(E), type);
2996 QualType CodeGenFunction::getVarArgType(const Expr *Arg) {
2997 // System headers on Windows define NULL to 0 instead of 0LL on Win64. MSVC
2998 // implicitly widens null pointer constants that are arguments to varargs
2999 // functions to pointer-sized ints.
3000 if (!getTarget().getTriple().isOSWindows())
3001 return Arg->getType();
3003 if (Arg->getType()->isIntegerType() &&
3004 getContext().getTypeSize(Arg->getType()) <
3005 getContext().getTargetInfo().getPointerWidth(0) &&
3006 Arg->isNullPointerConstant(getContext(),
3007 Expr::NPC_ValueDependentIsNotNull)) {
3008 return getContext().getIntPtrType();
3011 return Arg->getType();
3014 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
3015 // optimizer it can aggressively ignore unwind edges.
3017 CodeGenFunction::AddObjCARCExceptionMetadata(llvm::Instruction *Inst) {
3018 if (CGM.getCodeGenOpts().OptimizationLevel != 0 &&
3019 !CGM.getCodeGenOpts().ObjCAutoRefCountExceptions)
3020 Inst->setMetadata("clang.arc.no_objc_arc_exceptions",
3021 CGM.getNoObjCARCExceptionsMetadata());
3024 /// Emits a call to the given no-arguments nounwind runtime function.
3026 CodeGenFunction::EmitNounwindRuntimeCall(llvm::Value *callee,
3027 const llvm::Twine &name) {
3028 return EmitNounwindRuntimeCall(callee, None, name);
3031 /// Emits a call to the given nounwind runtime function.
3033 CodeGenFunction::EmitNounwindRuntimeCall(llvm::Value *callee,
3034 ArrayRef<llvm::Value*> args,
3035 const llvm::Twine &name) {
3036 llvm::CallInst *call = EmitRuntimeCall(callee, args, name);
3037 call->setDoesNotThrow();
3041 /// Emits a simple call (never an invoke) to the given no-arguments
3042 /// runtime function.
3044 CodeGenFunction::EmitRuntimeCall(llvm::Value *callee,
3045 const llvm::Twine &name) {
3046 return EmitRuntimeCall(callee, None, name);
3049 /// Emits a simple call (never an invoke) to the given runtime
3052 CodeGenFunction::EmitRuntimeCall(llvm::Value *callee,
3053 ArrayRef<llvm::Value*> args,
3054 const llvm::Twine &name) {
3055 llvm::CallInst *call = Builder.CreateCall(callee, args, name);
3056 call->setCallingConv(getRuntimeCC());
3060 // Calls which may throw must have operand bundles indicating which funclet
3061 // they are nested within.
3063 getBundlesForFunclet(llvm::Value *Callee,
3064 llvm::Instruction *CurrentFuncletPad,
3065 SmallVectorImpl<llvm::OperandBundleDef> &BundleList) {
3066 // There is no need for a funclet operand bundle if we aren't inside a funclet.
3067 if (!CurrentFuncletPad)
3070 // Skip intrinsics which cannot throw.
3071 auto *CalleeFn = dyn_cast<llvm::Function>(Callee->stripPointerCasts());
3072 if (CalleeFn && CalleeFn->isIntrinsic() && CalleeFn->doesNotThrow())
3075 BundleList.emplace_back("funclet", CurrentFuncletPad);
3078 /// Emits a call or invoke to the given noreturn runtime function.
3079 void CodeGenFunction::EmitNoreturnRuntimeCallOrInvoke(llvm::Value *callee,
3080 ArrayRef<llvm::Value*> args) {
3081 SmallVector<llvm::OperandBundleDef, 1> BundleList;
3082 getBundlesForFunclet(callee, CurrentFuncletPad, BundleList);
3084 if (getInvokeDest()) {
3085 llvm::InvokeInst *invoke =
3086 Builder.CreateInvoke(callee,
3087 getUnreachableBlock(),
3091 invoke->setDoesNotReturn();
3092 invoke->setCallingConv(getRuntimeCC());
3094 llvm::CallInst *call = Builder.CreateCall(callee, args, BundleList);
3095 call->setDoesNotReturn();
3096 call->setCallingConv(getRuntimeCC());
3097 Builder.CreateUnreachable();
3101 /// Emits a call or invoke instruction to the given nullary runtime
3104 CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::Value *callee,
3105 const Twine &name) {
3106 return EmitRuntimeCallOrInvoke(callee, None, name);
3109 /// Emits a call or invoke instruction to the given runtime function.
3111 CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::Value *callee,
3112 ArrayRef<llvm::Value*> args,
3113 const Twine &name) {
3114 llvm::CallSite callSite = EmitCallOrInvoke(callee, args, name);
3115 callSite.setCallingConv(getRuntimeCC());
3119 /// Emits a call or invoke instruction to the given function, depending
3120 /// on the current state of the EH stack.
3122 CodeGenFunction::EmitCallOrInvoke(llvm::Value *Callee,
3123 ArrayRef<llvm::Value *> Args,
3124 const Twine &Name) {
3125 llvm::BasicBlock *InvokeDest = getInvokeDest();
3127 llvm::Instruction *Inst;
3129 Inst = Builder.CreateCall(Callee, Args, Name);
3131 llvm::BasicBlock *ContBB = createBasicBlock("invoke.cont");
3132 Inst = Builder.CreateInvoke(Callee, ContBB, InvokeDest, Args, Name);
3136 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
3137 // optimizer it can aggressively ignore unwind edges.
3138 if (CGM.getLangOpts().ObjCAutoRefCount)
3139 AddObjCARCExceptionMetadata(Inst);
3141 return llvm::CallSite(Inst);
3144 /// \brief Store a non-aggregate value to an address to initialize it. For
3145 /// initialization, a non-atomic store will be used.
3146 static void EmitInitStoreOfNonAggregate(CodeGenFunction &CGF, RValue Src,
3149 CGF.EmitStoreOfScalar(Src.getScalarVal(), Dst, /*init=*/true);
3151 CGF.EmitStoreOfComplex(Src.getComplexVal(), Dst, /*init=*/true);
3154 void CodeGenFunction::deferPlaceholderReplacement(llvm::Instruction *Old,
3156 DeferredReplacements.push_back(std::make_pair(Old, New));
3159 RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
3160 llvm::Value *Callee,
3161 ReturnValueSlot ReturnValue,
3162 const CallArgList &CallArgs,
3163 CGCalleeInfo CalleeInfo,
3164 llvm::Instruction **callOrInvoke) {
3165 // FIXME: We no longer need the types from CallArgs; lift up and simplify.
3167 // Handle struct-return functions by passing a pointer to the
3168 // location that we would like to return into.
3169 QualType RetTy = CallInfo.getReturnType();
3170 const ABIArgInfo &RetAI = CallInfo.getReturnInfo();
3172 llvm::FunctionType *IRFuncTy =
3173 cast<llvm::FunctionType>(
3174 cast<llvm::PointerType>(Callee->getType())->getElementType());
3176 // If we're using inalloca, insert the allocation after the stack save.
3177 // FIXME: Do this earlier rather than hacking it in here!
3178 Address ArgMemory = Address::invalid();
3179 const llvm::StructLayout *ArgMemoryLayout = nullptr;
3180 if (llvm::StructType *ArgStruct = CallInfo.getArgStruct()) {
3181 ArgMemoryLayout = CGM.getDataLayout().getStructLayout(ArgStruct);
3182 llvm::Instruction *IP = CallArgs.getStackBase();
3183 llvm::AllocaInst *AI;
3185 IP = IP->getNextNode();
3186 AI = new llvm::AllocaInst(ArgStruct, "argmem", IP);
3188 AI = CreateTempAlloca(ArgStruct, "argmem");
3190 auto Align = CallInfo.getArgStructAlignment();
3191 AI->setAlignment(Align.getQuantity());
3192 AI->setUsedWithInAlloca(true);
3193 assert(AI->isUsedWithInAlloca() && !AI->isStaticAlloca());
3194 ArgMemory = Address(AI, Align);
3197 // Helper function to drill into the inalloca allocation.
3198 auto createInAllocaStructGEP = [&](unsigned FieldIndex) -> Address {
3200 CharUnits::fromQuantity(ArgMemoryLayout->getElementOffset(FieldIndex));
3201 return Builder.CreateStructGEP(ArgMemory, FieldIndex, FieldOffset);
3204 ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), CallInfo);
3205 SmallVector<llvm::Value *, 16> IRCallArgs(IRFunctionArgs.totalIRArgs());
3207 // If the call returns a temporary with struct return, create a temporary
3208 // alloca to hold the result, unless one is given to us.
3209 Address SRetPtr = Address::invalid();
3210 size_t UnusedReturnSize = 0;
3211 if (RetAI.isIndirect() || RetAI.isInAlloca()) {
3212 if (!ReturnValue.isNull()) {
3213 SRetPtr = ReturnValue.getValue();
3215 SRetPtr = CreateMemTemp(RetTy);
3216 if (HaveInsertPoint() && ReturnValue.isUnused()) {
3218 CGM.getDataLayout().getTypeAllocSize(ConvertTypeForMem(RetTy));
3219 if (EmitLifetimeStart(size, SRetPtr.getPointer()))
3220 UnusedReturnSize = size;
3223 if (IRFunctionArgs.hasSRetArg()) {
3224 IRCallArgs[IRFunctionArgs.getSRetArgNo()] = SRetPtr.getPointer();
3226 Address Addr = createInAllocaStructGEP(RetAI.getInAllocaFieldIndex());
3227 Builder.CreateStore(SRetPtr.getPointer(), Addr);
3231 assert(CallInfo.arg_size() == CallArgs.size() &&
3232 "Mismatch between function signature & arguments.");
3234 CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin();
3235 for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end();
3236 I != E; ++I, ++info_it, ++ArgNo) {
3237 const ABIArgInfo &ArgInfo = info_it->info;
3240 // Insert a padding argument to ensure proper alignment.
3241 if (IRFunctionArgs.hasPaddingArg(ArgNo))
3242 IRCallArgs[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
3243 llvm::UndefValue::get(ArgInfo.getPaddingType());
3245 unsigned FirstIRArg, NumIRArgs;
3246 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
3248 switch (ArgInfo.getKind()) {
3249 case ABIArgInfo::InAlloca: {
3250 assert(NumIRArgs == 0);
3251 assert(getTarget().getTriple().getArch() == llvm::Triple::x86);
3252 if (RV.isAggregate()) {
3253 // Replace the placeholder with the appropriate argument slot GEP.
3254 llvm::Instruction *Placeholder =
3255 cast<llvm::Instruction>(RV.getAggregatePointer());
3256 CGBuilderTy::InsertPoint IP = Builder.saveIP();
3257 Builder.SetInsertPoint(Placeholder);
3258 Address Addr = createInAllocaStructGEP(ArgInfo.getInAllocaFieldIndex());
3259 Builder.restoreIP(IP);
3260 deferPlaceholderReplacement(Placeholder, Addr.getPointer());
3262 // Store the RValue into the argument struct.
3263 Address Addr = createInAllocaStructGEP(ArgInfo.getInAllocaFieldIndex());
3264 unsigned AS = Addr.getType()->getPointerAddressSpace();
3265 llvm::Type *MemType = ConvertTypeForMem(I->Ty)->getPointerTo(AS);
3266 // There are some cases where a trivial bitcast is not avoidable. The
3267 // definition of a type later in a translation unit may change it's type
3268 // from {}* to (%struct.foo*)*.
3269 if (Addr.getType() != MemType)
3270 Addr = Builder.CreateBitCast(Addr, MemType);
3271 LValue argLV = MakeAddrLValue(Addr, I->Ty);
3272 EmitInitStoreOfNonAggregate(*this, RV, argLV);
3277 case ABIArgInfo::Indirect: {
3278 assert(NumIRArgs == 1);
3279 if (RV.isScalar() || RV.isComplex()) {
3280 // Make a temporary alloca to pass the argument.
3281 Address Addr = CreateMemTemp(I->Ty, ArgInfo.getIndirectAlign());
3282 IRCallArgs[FirstIRArg] = Addr.getPointer();
3284 LValue argLV = MakeAddrLValue(Addr, I->Ty);
3285 EmitInitStoreOfNonAggregate(*this, RV, argLV);
3287 // We want to avoid creating an unnecessary temporary+copy here;
3288 // however, we need one in three cases:
3289 // 1. If the argument is not byval, and we are required to copy the
3290 // source. (This case doesn't occur on any common architecture.)
3291 // 2. If the argument is byval, RV is not sufficiently aligned, and
3292 // we cannot force it to be sufficiently aligned.
3293 // 3. If the argument is byval, but RV is located in an address space
3294 // different than that of the argument (0).
3295 Address Addr = RV.getAggregateAddress();
3296 CharUnits Align = ArgInfo.getIndirectAlign();
3297 const llvm::DataLayout *TD = &CGM.getDataLayout();
3298 const unsigned RVAddrSpace = Addr.getType()->getAddressSpace();
3299 const unsigned ArgAddrSpace =
3300 (FirstIRArg < IRFuncTy->getNumParams()
3301 ? IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace()
3303 if ((!ArgInfo.getIndirectByVal() && I->NeedsCopy) ||
3304 (ArgInfo.getIndirectByVal() && Addr.getAlignment() < Align &&
3305 llvm::getOrEnforceKnownAlignment(Addr.getPointer(),
3306 Align.getQuantity(), *TD)
3307 < Align.getQuantity()) ||
3308 (ArgInfo.getIndirectByVal() && (RVAddrSpace != ArgAddrSpace))) {
3309 // Create an aligned temporary, and copy to it.
3310 Address AI = CreateMemTemp(I->Ty, ArgInfo.getIndirectAlign());
3311 IRCallArgs[FirstIRArg] = AI.getPointer();
3312 EmitAggregateCopy(AI, Addr, I->Ty, RV.isVolatileQualified());
3314 // Skip the extra memcpy call.
3315 IRCallArgs[FirstIRArg] = Addr.getPointer();
3321 case ABIArgInfo::Ignore:
3322 assert(NumIRArgs == 0);
3325 case ABIArgInfo::Extend:
3326 case ABIArgInfo::Direct: {
3327 if (!isa<llvm::StructType>(ArgInfo.getCoerceToType()) &&
3328 ArgInfo.getCoerceToType() == ConvertType(info_it->type) &&
3329 ArgInfo.getDirectOffset() == 0) {
3330 assert(NumIRArgs == 1);
3333 V = RV.getScalarVal();
3335 V = Builder.CreateLoad(RV.getAggregateAddress());
3337 // We might have to widen integers, but we should never truncate.
3338 if (ArgInfo.getCoerceToType() != V->getType() &&
3339 V->getType()->isIntegerTy())
3340 V = Builder.CreateZExt(V, ArgInfo.getCoerceToType());
3342 // If the argument doesn't match, perform a bitcast to coerce it. This
3343 // can happen due to trivial type mismatches.
3344 if (FirstIRArg < IRFuncTy->getNumParams() &&
3345 V->getType() != IRFuncTy->getParamType(FirstIRArg))
3346 V = Builder.CreateBitCast(V, IRFuncTy->getParamType(FirstIRArg));
3347 IRCallArgs[FirstIRArg] = V;
3351 // FIXME: Avoid the conversion through memory if possible.
3352 Address Src = Address::invalid();
3353 if (RV.isScalar() || RV.isComplex()) {
3354 Src = CreateMemTemp(I->Ty, "coerce");
3355 LValue SrcLV = MakeAddrLValue(Src, I->Ty);
3356 EmitInitStoreOfNonAggregate(*this, RV, SrcLV);
3358 Src = RV.getAggregateAddress();
3361 // If the value is offset in memory, apply the offset now.
3362 Src = emitAddressAtOffset(*this, Src, ArgInfo);
3364 // Fast-isel and the optimizer generally like scalar values better than
3365 // FCAs, so we flatten them if this is safe to do for this argument.
3366 llvm::StructType *STy =
3367 dyn_cast<llvm::StructType>(ArgInfo.getCoerceToType());
3368 if (STy && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) {
3369 llvm::Type *SrcTy = Src.getType()->getElementType();
3370 uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(SrcTy);
3371 uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(STy);
3373 // If the source type is smaller than the destination type of the
3374 // coerce-to logic, copy the source value into a temp alloca the size
3375 // of the destination type to allow loading all of it. The bits past
3376 // the source value are left undef.
3377 if (SrcSize < DstSize) {
3379 = CreateTempAlloca(STy, Src.getAlignment(),
3380 Src.getName() + ".coerce");
3381 Builder.CreateMemCpy(TempAlloca, Src, SrcSize);
3384 Src = Builder.CreateBitCast(Src, llvm::PointerType::getUnqual(STy));
3387 auto SrcLayout = CGM.getDataLayout().getStructLayout(STy);
3388 assert(NumIRArgs == STy->getNumElements());
3389 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
3390 auto Offset = CharUnits::fromQuantity(SrcLayout->getElementOffset(i));
3391 Address EltPtr = Builder.CreateStructGEP(Src, i, Offset);
3392 llvm::Value *LI = Builder.CreateLoad(EltPtr);
3393 IRCallArgs[FirstIRArg + i] = LI;
3396 // In the simple case, just pass the coerced loaded value.
3397 assert(NumIRArgs == 1);
3398 IRCallArgs[FirstIRArg] =
3399 CreateCoercedLoad(Src, ArgInfo.getCoerceToType(), *this);
3405 case ABIArgInfo::Expand:
3406 unsigned IRArgPos = FirstIRArg;
3407 ExpandTypeToArgs(I->Ty, RV, IRFuncTy, IRCallArgs, IRArgPos);
3408 assert(IRArgPos == FirstIRArg + NumIRArgs);
3413 if (ArgMemory.isValid()) {
3414 llvm::Value *Arg = ArgMemory.getPointer();
3415 if (CallInfo.isVariadic()) {
3416 // When passing non-POD arguments by value to variadic functions, we will
3417 // end up with a variadic prototype and an inalloca call site. In such
3418 // cases, we can't do any parameter mismatch checks. Give up and bitcast
3421 cast<llvm::PointerType>(Callee->getType())->getAddressSpace();
3422 Callee = Builder.CreateBitCast(
3423 Callee, getTypes().GetFunctionType(CallInfo)->getPointerTo(CalleeAS));
3425 llvm::Type *LastParamTy =
3426 IRFuncTy->getParamType(IRFuncTy->getNumParams() - 1);
3427 if (Arg->getType() != LastParamTy) {
3429 // Assert that these structs have equivalent element types.
3430 llvm::StructType *FullTy = CallInfo.getArgStruct();
3431 llvm::StructType *DeclaredTy = cast<llvm::StructType>(
3432 cast<llvm::PointerType>(LastParamTy)->getElementType());
3433 assert(DeclaredTy->getNumElements() == FullTy->getNumElements());
3434 for (llvm::StructType::element_iterator DI = DeclaredTy->element_begin(),
3435 DE = DeclaredTy->element_end(),
3436 FI = FullTy->element_begin();
3437 DI != DE; ++DI, ++FI)
3440 Arg = Builder.CreateBitCast(Arg, LastParamTy);
3443 assert(IRFunctionArgs.hasInallocaArg());
3444 IRCallArgs[IRFunctionArgs.getInallocaArgNo()] = Arg;
3447 if (!CallArgs.getCleanupsToDeactivate().empty())
3448 deactivateArgCleanupsBeforeCall(*this, CallArgs);
3450 // If the callee is a bitcast of a function to a varargs pointer to function
3451 // type, check to see if we can remove the bitcast. This handles some cases
3452 // with unprototyped functions.
3453 if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Callee))
3454 if (llvm::Function *CalleeF = dyn_cast<llvm::Function>(CE->getOperand(0))) {
3455 llvm::PointerType *CurPT=cast<llvm::PointerType>(Callee->getType());
3456 llvm::FunctionType *CurFT =
3457 cast<llvm::FunctionType>(CurPT->getElementType());
3458 llvm::FunctionType *ActualFT = CalleeF->getFunctionType();
3460 if (CE->getOpcode() == llvm::Instruction::BitCast &&
3461 ActualFT->getReturnType() == CurFT->getReturnType() &&
3462 ActualFT->getNumParams() == CurFT->getNumParams() &&
3463 ActualFT->getNumParams() == IRCallArgs.size() &&
3464 (CurFT->isVarArg() || !ActualFT->isVarArg())) {
3465 bool ArgsMatch = true;
3466 for (unsigned i = 0, e = ActualFT->getNumParams(); i != e; ++i)
3467 if (ActualFT->getParamType(i) != CurFT->getParamType(i)) {
3472 // Strip the cast if we can get away with it. This is a nice cleanup,
3473 // but also allows us to inline the function at -O0 if it is marked
3480 assert(IRCallArgs.size() == IRFuncTy->getNumParams() || IRFuncTy->isVarArg());
3481 for (unsigned i = 0; i < IRCallArgs.size(); ++i) {
3482 // Inalloca argument can have different type.
3483 if (IRFunctionArgs.hasInallocaArg() &&
3484 i == IRFunctionArgs.getInallocaArgNo())
3486 if (i < IRFuncTy->getNumParams())
3487 assert(IRCallArgs[i]->getType() == IRFuncTy->getParamType(i));
3490 unsigned CallingConv;
3491 CodeGen::AttributeListType AttributeList;
3492 CGM.ConstructAttributeList(Callee->getName(), CallInfo, CalleeInfo,
3493 AttributeList, CallingConv,
3494 /*AttrOnCallSite=*/true);
3495 llvm::AttributeSet Attrs = llvm::AttributeSet::get(getLLVMContext(),
3499 if (currentFunctionUsesSEHTry()) {
3500 // SEH cares about asynchronous exceptions, everything can "throw."
3501 CannotThrow = false;
3502 } else if (isCleanupPadScope() &&
3503 EHPersonality::get(*this).isMSVCXXPersonality()) {
3504 // The MSVC++ personality will implicitly terminate the program if an
3505 // exception is thrown. An unwind edge cannot be reached.
3508 // Otherwise, nowunind callsites will never throw.
3509 CannotThrow = Attrs.hasAttribute(llvm::AttributeSet::FunctionIndex,
3510 llvm::Attribute::NoUnwind);
3512 llvm::BasicBlock *InvokeDest = CannotThrow ? nullptr : getInvokeDest();
3514 SmallVector<llvm::OperandBundleDef, 1> BundleList;
3515 getBundlesForFunclet(Callee, CurrentFuncletPad, BundleList);
3519 CS = Builder.CreateCall(Callee, IRCallArgs, BundleList);
3521 llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
3522 CS = Builder.CreateInvoke(Callee, Cont, InvokeDest, IRCallArgs,
3527 *callOrInvoke = CS.getInstruction();
3529 if (CurCodeDecl && CurCodeDecl->hasAttr<FlattenAttr>() &&
3530 !CS.hasFnAttr(llvm::Attribute::NoInline))
3532 Attrs.addAttribute(getLLVMContext(), llvm::AttributeSet::FunctionIndex,
3533 llvm::Attribute::AlwaysInline);
3535 // Disable inlining inside SEH __try blocks.
3536 if (isSEHTryScope())
3538 Attrs.addAttribute(getLLVMContext(), llvm::AttributeSet::FunctionIndex,
3539 llvm::Attribute::NoInline);
3541 CS.setAttributes(Attrs);
3542 CS.setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
3544 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
3545 // optimizer it can aggressively ignore unwind edges.
3546 if (CGM.getLangOpts().ObjCAutoRefCount)
3547 AddObjCARCExceptionMetadata(CS.getInstruction());
3549 // If the call doesn't return, finish the basic block and clear the
3550 // insertion point; this allows the rest of IRgen to discard
3551 // unreachable code.
3552 if (CS.doesNotReturn()) {
3553 if (UnusedReturnSize)
3554 EmitLifetimeEnd(llvm::ConstantInt::get(Int64Ty, UnusedReturnSize),
3555 SRetPtr.getPointer());
3557 Builder.CreateUnreachable();
3558 Builder.ClearInsertionPoint();
3560 // FIXME: For now, emit a dummy basic block because expr emitters in
3561 // generally are not ready to handle emitting expressions at unreachable
3563 EnsureInsertPoint();
3565 // Return a reasonable RValue.
3566 return GetUndefRValue(RetTy);
3569 llvm::Instruction *CI = CS.getInstruction();
3570 if (Builder.isNamePreserving() && !CI->getType()->isVoidTy())
3571 CI->setName("call");
3573 // Emit any writebacks immediately. Arguably this should happen
3574 // after any return-value munging.
3575 if (CallArgs.hasWritebacks())
3576 emitWritebacks(*this, CallArgs);
3578 // The stack cleanup for inalloca arguments has to run out of the normal
3579 // lexical order, so deactivate it and run it manually here.
3580 CallArgs.freeArgumentMemory(*this);
3582 if (llvm::CallInst *Call = dyn_cast<llvm::CallInst>(CI)) {
3583 const Decl *TargetDecl = CalleeInfo.getCalleeDecl();
3584 if (TargetDecl && TargetDecl->hasAttr<NotTailCalledAttr>())
3585 Call->setTailCallKind(llvm::CallInst::TCK_NoTail);
3589 switch (RetAI.getKind()) {
3590 case ABIArgInfo::InAlloca:
3591 case ABIArgInfo::Indirect: {
3592 RValue ret = convertTempToRValue(SRetPtr, RetTy, SourceLocation());
3593 if (UnusedReturnSize)
3594 EmitLifetimeEnd(llvm::ConstantInt::get(Int64Ty, UnusedReturnSize),
3595 SRetPtr.getPointer());
3599 case ABIArgInfo::Ignore:
3600 // If we are ignoring an argument that had a result, make sure to
3601 // construct the appropriate return value for our caller.
3602 return GetUndefRValue(RetTy);
3604 case ABIArgInfo::Extend:
3605 case ABIArgInfo::Direct: {
3606 llvm::Type *RetIRTy = ConvertType(RetTy);
3607 if (RetAI.getCoerceToType() == RetIRTy && RetAI.getDirectOffset() == 0) {
3608 switch (getEvaluationKind(RetTy)) {
3610 llvm::Value *Real = Builder.CreateExtractValue(CI, 0);
3611 llvm::Value *Imag = Builder.CreateExtractValue(CI, 1);
3612 return RValue::getComplex(std::make_pair(Real, Imag));
3614 case TEK_Aggregate: {
3615 Address DestPtr = ReturnValue.getValue();
3616 bool DestIsVolatile = ReturnValue.isVolatile();
3618 if (!DestPtr.isValid()) {
3619 DestPtr = CreateMemTemp(RetTy, "agg.tmp");
3620 DestIsVolatile = false;
3622 BuildAggStore(*this, CI, DestPtr, DestIsVolatile);
3623 return RValue::getAggregate(DestPtr);
3626 // If the argument doesn't match, perform a bitcast to coerce it. This
3627 // can happen due to trivial type mismatches.
3628 llvm::Value *V = CI;
3629 if (V->getType() != RetIRTy)
3630 V = Builder.CreateBitCast(V, RetIRTy);
3631 return RValue::get(V);
3634 llvm_unreachable("bad evaluation kind");
3637 Address DestPtr = ReturnValue.getValue();
3638 bool DestIsVolatile = ReturnValue.isVolatile();
3640 if (!DestPtr.isValid()) {
3641 DestPtr = CreateMemTemp(RetTy, "coerce");
3642 DestIsVolatile = false;
3645 // If the value is offset in memory, apply the offset now.
3646 Address StorePtr = emitAddressAtOffset(*this, DestPtr, RetAI);
3647 CreateCoercedStore(CI, StorePtr, DestIsVolatile, *this);
3649 return convertTempToRValue(DestPtr, RetTy, SourceLocation());
3652 case ABIArgInfo::Expand:
3653 llvm_unreachable("Invalid ABI kind for return argument");
3656 llvm_unreachable("Unhandled ABIArgInfo::Kind");
3659 const Decl *TargetDecl = CalleeInfo.getCalleeDecl();
3661 if (Ret.isScalar() && TargetDecl) {
3662 if (const auto *AA = TargetDecl->getAttr<AssumeAlignedAttr>()) {
3663 llvm::Value *OffsetValue = nullptr;
3664 if (const auto *Offset = AA->getOffset())
3665 OffsetValue = EmitScalarExpr(Offset);
3667 llvm::Value *Alignment = EmitScalarExpr(AA->getAlignment());
3668 llvm::ConstantInt *AlignmentCI = cast<llvm::ConstantInt>(Alignment);
3669 EmitAlignmentAssumption(Ret.getScalarVal(), AlignmentCI->getZExtValue(),
3677 /* VarArg handling */
3679 Address CodeGenFunction::EmitVAArg(VAArgExpr *VE, Address &VAListAddr) {
3680 VAListAddr = VE->isMicrosoftABI()
3681 ? EmitMSVAListRef(VE->getSubExpr())
3682 : EmitVAListRef(VE->getSubExpr());
3683 QualType Ty = VE->getType();
3684 if (VE->isMicrosoftABI())
3685 return CGM.getTypes().getABIInfo().EmitMSVAArg(*this, VAListAddr, Ty);
3686 return CGM.getTypes().getABIInfo().EmitVAArg(*this, VAListAddr, Ty);